Compare commits
95 Commits
xen-api-v0
...
xo-server/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f32a89015 | ||
|
|
20d1b7c481 | ||
|
|
458609ed2e | ||
|
|
fcec8113f3 | ||
|
|
ebbd882ee4 | ||
|
|
0506e19a66 | ||
|
|
ecc62e4f54 | ||
|
|
2b95eb4e4d | ||
|
|
bcde9e0f74 | ||
|
|
114501ebc7 | ||
|
|
ebab7c0867 | ||
|
|
0e2270fb6e | ||
|
|
593493ec0c | ||
|
|
d92898a806 | ||
|
|
7890e46551 | ||
|
|
ef942a6209 | ||
|
|
fdde916388 | ||
|
|
31314d201b | ||
|
|
a29a949c51 | ||
|
|
cc1ce8c5f8 | ||
|
|
a21bf4ebe5 | ||
|
|
3d0420dbd9 | ||
|
|
04c74dd30f | ||
|
|
2f256291ae | ||
|
|
bcb66a4145 | ||
|
|
2d9368062e | ||
|
|
b110bacf61 | ||
|
|
78afdc0af5 | ||
|
|
ad6cd7985a | ||
|
|
a61661776d | ||
|
|
1a9ebddcab | ||
|
|
7ab907a854 | ||
|
|
68a34f7cdb | ||
|
|
da4ff3082d | ||
|
|
9c05a59b5f | ||
|
|
6780146505 | ||
|
|
2758833fc6 | ||
|
|
2786d7ec46 | ||
|
|
945a2006c9 | ||
|
|
b9e574e32f | ||
|
|
34f1ef1680 | ||
|
|
4ac4310bc1 | ||
|
|
a10997ca66 | ||
|
|
0e52a4c7dc | ||
|
|
a4b3e22c2b | ||
|
|
441bd7c754 | ||
|
|
ddbd32d1cb | ||
|
|
a5b0cbeaea | ||
|
|
c6f3b2b1ce | ||
|
|
3d869d9fa1 | ||
|
|
7a5229741f | ||
|
|
78e0c2d8fa | ||
|
|
5928984069 | ||
|
|
61a472f108 | ||
|
|
e45f78ea20 | ||
|
|
b3ae9d88eb | ||
|
|
f7f26537be | ||
|
|
96848fc6d4 | ||
|
|
51e6f0c79f | ||
|
|
4f94ad40b7 | ||
|
|
43e1eb9939 | ||
|
|
1f6d7de861 | ||
|
|
bd623c2daf | ||
|
|
40c71c2102 | ||
|
|
72a1580eff | ||
|
|
9e2404a0d7 | ||
|
|
7dd84d1518 | ||
|
|
d800db5d09 | ||
|
|
2714ccff38 | ||
|
|
1d493e411b | ||
|
|
2a0c222f2d | ||
|
|
641d68de0e | ||
|
|
2dd0fd660b | ||
|
|
bb5441c7bc | ||
|
|
eeea9e662b | ||
|
|
8d4874e240 | ||
|
|
a8ba4a1a8e | ||
|
|
0c027247ec | ||
|
|
164cb39c1b | ||
|
|
52503de645 | ||
|
|
83b8b5de61 | ||
|
|
3e326c4e62 | ||
|
|
a6b0690416 | ||
|
|
dcd007c5c7 | ||
|
|
eb090e4874 | ||
|
|
4b716584f7 | ||
|
|
4bc348f39f | ||
|
|
9c75992fe4 | ||
|
|
4bb2702ac5 | ||
|
|
ea8133cb41 | ||
|
|
fc40c7b03d | ||
|
|
7fe5b66fdb | ||
|
|
0f1d052493 | ||
|
|
56a182f795 | ||
|
|
e8da1b943b |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -8,6 +8,10 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/@xen-orchestra/log/src/transports/index.js
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
@@ -20,6 +24,8 @@
|
||||
/packages/xo-web/src/common/intl/locales/index.js
|
||||
/packages/xo-web/src/common/themes/index.js
|
||||
|
||||
/packages/xo-server-rework/src/app/mixins/index.js
|
||||
|
||||
npm-debug.log
|
||||
npm-debug.log.*
|
||||
pnpm-debug.log
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- stable
|
||||
#- stable # disable for now due to an issue of indirect dep upath with Node 9
|
||||
- 8
|
||||
- 6
|
||||
|
||||
@@ -12,6 +12,7 @@ addons:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
- vmdk-stream-converter
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
|
||||
3
@xen-orchestra/async-fs/.babelrc.js
Normal file
3
@xen-orchestra/async-fs/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
@xen-orchestra/async-fs/.npmignore
Normal file
24
@xen-orchestra/async-fs/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
49
@xen-orchestra/async-fs/README.md
Normal file
49
@xen-orchestra/async-fs/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
**TODO**
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
47
@xen-orchestra/async-fs/package.json
Normal file
47
@xen-orchestra/async-fs/package.json
Normal file
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/async-fs",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/async-fs",
|
||||
"bugs": "https://github.com/vatesfr/xo-web/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@isonoe.net"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": "^0.9.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
10
@xen-orchestra/async-fs/src/index.js
Normal file
10
@xen-orchestra/async-fs/src/index.js
Normal file
@@ -0,0 +1,10 @@
|
||||
// @flow
|
||||
|
||||
import fs from 'fs'
|
||||
import { promisifyAll } from 'promise-toolbox'
|
||||
|
||||
const NOT_PROMISIFIABLE_RE = /^(?:[_A-Z]|exists$)|(?:Async|Stream|Sync)$/
|
||||
|
||||
module.exports = promisifyAll(fs, {
|
||||
mapper: name => !NOT_PROMISIFIABLE_RE.test(name) && name,
|
||||
})
|
||||
3
@xen-orchestra/async-map/.babelrc.js
Normal file
3
@xen-orchestra/async-map/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
@xen-orchestra/async-map/.npmignore
Normal file
24
@xen-orchestra/async-map/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
49
@xen-orchestra/async-map/README.md
Normal file
49
@xen-orchestra/async-map/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
**TODO**
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
50
@xen-orchestra/async-map/package.json
Normal file
50
@xen-orchestra/async-map/package.json
Normal file
@@ -0,0 +1,50 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/async-map",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/async-map",
|
||||
"bugs": "https://github.com/vatesfr/xo-web/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@isonoe.net"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
36
@xen-orchestra/async-map/src/index.js
Normal file
36
@xen-orchestra/async-map/src/index.js
Normal file
@@ -0,0 +1,36 @@
|
||||
// @flow
|
||||
|
||||
import { map } from 'lodash'
|
||||
|
||||
// Similar to map() + Promise.all() but wait for all promises to
|
||||
// settle before rejecting (with the first error)
|
||||
const asyncMap = <T1, T2>(
|
||||
collection: Array<T1> | Promise<Array<T1>>,
|
||||
iteratee: (value: T1, key: number, collection: Array<T1>) => T2
|
||||
): Promise<Array<T2>> => {
|
||||
if (!Array.isArray(collection)) {
|
||||
return collection.then(collection => asyncMap(collection, iteratee))
|
||||
}
|
||||
|
||||
let errorContainer
|
||||
const onError = error => {
|
||||
if (errorContainer === undefined) {
|
||||
errorContainer = { error }
|
||||
}
|
||||
}
|
||||
|
||||
return Promise.all(
|
||||
map(collection, (item, key, collection) =>
|
||||
new Promise(resolve => {
|
||||
resolve(iteratee(item, key, collection))
|
||||
}).catch(onError)
|
||||
)
|
||||
).then(values => {
|
||||
if (errorContainer !== undefined) {
|
||||
throw errorContainer.error
|
||||
}
|
||||
return values
|
||||
})
|
||||
}
|
||||
|
||||
export { asyncMap as default }
|
||||
@@ -7,34 +7,46 @@ const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const configs = {
|
||||
'@babel/plugin-proposal-decorators': {
|
||||
legacy: true,
|
||||
},
|
||||
'@babel/preset-env' (pkg) {
|
||||
return {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const getConfig = (key, ...args) => {
|
||||
const config = configs[key]
|
||||
return config === undefined ? {} : typeof config === 'function' ? config(...args) : config
|
||||
}
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
|
||||
presets === undefined && (presets = {})
|
||||
presets['@babel/preset-env'] = {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = {}
|
||||
plugins[name] = getConfig(name, pkg)
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = {}
|
||||
presets[name] = getConfig(name, pkg)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/babel-config",
|
||||
"version": "0.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/babel-config",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"scheduling",
|
||||
"task"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -41,10 +41,10 @@
|
||||
"moment-timezone": "^0.5.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
@@ -53,7 +53,7 @@
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
|
||||
3
@xen-orchestra/defined/.babelrc.js
Normal file
3
@xen-orchestra/defined/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
@xen-orchestra/defined/.npmignore
Normal file
24
@xen-orchestra/defined/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
49
@xen-orchestra/defined/README.md
Normal file
49
@xen-orchestra/defined/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
**TODO**
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
48
@xen-orchestra/defined/package.json
Normal file
48
@xen-orchestra/defined/package.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/defined",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/defined",
|
||||
"bugs": "https://github.com/vatesfr/xo-web/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
65
@xen-orchestra/defined/src/index.js
Normal file
65
@xen-orchestra/defined/src/index.js
Normal file
@@ -0,0 +1,65 @@
|
||||
// @flow
|
||||
|
||||
// Usage:
|
||||
//
|
||||
// ```js
|
||||
// const httpProxy = defined(
|
||||
// process.env.HTTP_PROXY,
|
||||
// process.env.http_proxy
|
||||
// )
|
||||
//
|
||||
// const httpProxy = defined([
|
||||
// process.env.HTTP_PROXY,
|
||||
// process.env.http_proxy
|
||||
// ])
|
||||
// ```
|
||||
export default function defined () {
|
||||
let args = arguments
|
||||
let n = args.length
|
||||
if (n === 1) {
|
||||
args = arguments[0]
|
||||
n = args.length
|
||||
}
|
||||
|
||||
for (let i = 0; i < n; ++i) {
|
||||
let arg = arguments[i]
|
||||
if (typeof arg === 'function') {
|
||||
arg = get(arg)
|
||||
}
|
||||
if (arg !== undefined) {
|
||||
return arg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Usage:
|
||||
//
|
||||
// ```js
|
||||
// const friendName = get(() => props.user.friends[0].name)
|
||||
//
|
||||
// // this form can be used to avoid recreating functions:
|
||||
// const getFriendName = _ => _.friends[0].name
|
||||
// const friendName = get(getFriendName, props.user)
|
||||
// ```
|
||||
export const get = (accessor: (input: ?any) => any, arg: ?any) => {
|
||||
try {
|
||||
return accessor(arg)
|
||||
} catch (error) {
|
||||
if (!(error instanceof TypeError)) { // avoid hidding other errors
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Usage:
|
||||
//
|
||||
// ```js
|
||||
// const httpAgent = ifDef(
|
||||
// process.env.HTTP_PROXY,
|
||||
// _ => new ProxyAgent(_)
|
||||
// )
|
||||
// ```
|
||||
export const ifDef = (value: ?any, thenFn: (value: any) => any) =>
|
||||
value !== undefined
|
||||
? thenFn(value)
|
||||
: value
|
||||
3
@xen-orchestra/emit-async/.babelrc.js
Normal file
3
@xen-orchestra/emit-async/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
@xen-orchestra/emit-async/.npmignore
Normal file
24
@xen-orchestra/emit-async/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
49
@xen-orchestra/emit-async/README.md
Normal file
49
@xen-orchestra/emit-async/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
**TODO**
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
48
@xen-orchestra/emit-async/package.json
Normal file
48
@xen-orchestra/emit-async/package.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/emit-async",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/emit-async",
|
||||
"bugs": "https://github.com/vatesfr/xo-web/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
24
@xen-orchestra/emit-async/src/index.js
Normal file
24
@xen-orchestra/emit-async/src/index.js
Normal file
@@ -0,0 +1,24 @@
|
||||
export default function emitAsync (event) {
|
||||
let opts
|
||||
let i = 1
|
||||
|
||||
// an option object has been passed as first param
|
||||
if (typeof event !== 'string') {
|
||||
opts = event
|
||||
event = arguments[i++]
|
||||
}
|
||||
|
||||
const n = arguments.length - i
|
||||
const args = new Array(n)
|
||||
for (let j = 0; j < n; ++j) {
|
||||
args[j] = arguments[j + i]
|
||||
}
|
||||
|
||||
const onError = opts != null && opts.onError
|
||||
|
||||
return Promise.all(this.listeners(event).map(
|
||||
listener => new Promise(resolve => {
|
||||
resolve(listener.apply(this, args))
|
||||
}).catch(onError)
|
||||
))
|
||||
}
|
||||
3
@xen-orchestra/fs/.babelrc.js
Normal file
3
@xen-orchestra/fs/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
54
@xen-orchestra/fs/package.json
Normal file
54
@xen-orchestra/fs/package.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"execa": "^0.10.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"xo-remote-parser": "^0.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
// @flow
|
||||
|
||||
import getStream from 'get-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { type Readable, type Writable } from 'stream'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
|
||||
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
type Data = Buffer | Readable | string
|
||||
@@ -54,7 +54,7 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
async test (): Promise<Object> {
|
||||
const testFileName = `${Date.now()}.test`
|
||||
const data = getPseudoRandomBytes(1024 * 1024)
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
await this.outputFile(testFileName, data)
|
||||
@@ -97,7 +97,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this.createReadStream(file, options).then(streamToBuffer)
|
||||
return this.createReadStream(file, options).then(getStream.buffer)
|
||||
}
|
||||
|
||||
async rename (
|
||||
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '.'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test("fs test doesn't crash", async () => {
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const result = await handler.test()
|
||||
expect(result.success).toBeTruthy()
|
||||
})
|
||||
@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
|
||||
export type { default as RemoteHandler } from './abstract'
|
||||
export type Remote = { url: string }
|
||||
|
||||
const HANDLERS = {
|
||||
@@ -1,7 +1,9 @@
|
||||
import Smb2 from '@marsaud/smb2-promise'
|
||||
import { lastly as pFinally } from 'promise-toolbox'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { noop, pFinally } from '../utils'
|
||||
|
||||
const noop = () => {}
|
||||
|
||||
// Normalize the error code for file not found.
|
||||
const normalizeError = error => {
|
||||
3
@xen-orchestra/log/.babelrc.js
Normal file
3
@xen-orchestra/log/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
@xen-orchestra/log/.npmignore
Normal file
24
@xen-orchestra/log/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
141
@xen-orchestra/log/README.md
Normal file
141
@xen-orchestra/log/README.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# @xen-orchestra/log [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/log):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/log
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Everywhere something should be logged:
|
||||
|
||||
```js
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
|
||||
const log = createLogger('xo-server-api')
|
||||
log.warn('foo')
|
||||
```
|
||||
|
||||
Then at application level you can choose how to handle these logs:
|
||||
|
||||
```js
|
||||
import { configure, transports } from '@xen-orchestra/log'
|
||||
|
||||
configure([
|
||||
{
|
||||
// if filter is a string, then it is pattern
|
||||
// (https://github.com/visionmedia/debug#wildcards) which is
|
||||
// matched against the namespace of the logs
|
||||
filter: process.env.DEBUG,
|
||||
|
||||
transport: transports.console()
|
||||
},
|
||||
{
|
||||
// only levels >= warn
|
||||
level: 'warn',
|
||||
|
||||
transport: transports.email({
|
||||
service: 'gmail',
|
||||
auth: {
|
||||
user: 'jane.smith@gmail.com',
|
||||
pass: 'H&NbECcpXF|pyXe#%ZEb'
|
||||
},
|
||||
from: 'jane.smith@gmail.com',
|
||||
to: [
|
||||
'jane.smith@gmail.com',
|
||||
'sam.doe@yahoo.com'
|
||||
]
|
||||
})
|
||||
}
|
||||
])
|
||||
```
|
||||
|
||||
### Transports
|
||||
|
||||
#### Console
|
||||
|
||||
```js
|
||||
configure(transports.console())
|
||||
```
|
||||
|
||||
#### Email
|
||||
|
||||
Optional dependency:
|
||||
|
||||
```
|
||||
> yarn add nodemailer pretty-format
|
||||
```
|
||||
|
||||
Configuration:
|
||||
|
||||
```js
|
||||
configure(transports.email({
|
||||
service: 'gmail',
|
||||
auth: {
|
||||
user: 'jane.smith@gmail.com',
|
||||
pass: 'H&NbECcpXF|pyXe#%ZEb'
|
||||
},
|
||||
from: 'jane.smith@gmail.com',
|
||||
to: [
|
||||
'jane.smith@gmail.com',
|
||||
'sam.doe@yahoo.com'
|
||||
]
|
||||
}))
|
||||
```
|
||||
|
||||
#### Syslog
|
||||
|
||||
Optional dependency:
|
||||
|
||||
```
|
||||
> yarn add split-host syslog-client
|
||||
```
|
||||
|
||||
Configuration:
|
||||
|
||||
```js
|
||||
// By default, log to udp://localhost:514
|
||||
configure(transports.syslog())
|
||||
|
||||
// But TCP, a different host, or a different port can be used
|
||||
configure(transports.syslog('tcp://syslog.company.lan'))
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xo-web/issues/)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
1
@xen-orchestra/log/configure.js
Symbolic link
1
@xen-orchestra/log/configure.js
Symbolic link
@@ -0,0 +1 @@
|
||||
dist/configure.js
|
||||
53
@xen-orchestra/log/package.json
Normal file
53
@xen-orchestra/log/package.json
Normal file
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/log",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/log",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.46",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.9.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "index-modules --cjs-lazy src/transports && cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "index-modules --cjs-lazy src/transports && cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
94
@xen-orchestra/log/src/configure.js
Normal file
94
@xen-orchestra/log/src/configure.js
Normal file
@@ -0,0 +1,94 @@
|
||||
import createConsoleTransport from './transports/console'
|
||||
import LEVELS, { resolve } from './levels'
|
||||
import { compileGlobPattern } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const createTransport = config => {
|
||||
if (typeof config === 'function') {
|
||||
return config
|
||||
}
|
||||
|
||||
if (Array.isArray(config)) {
|
||||
const transports = config.map(createTransport)
|
||||
const { length } = transports
|
||||
return function () {
|
||||
for (let i = 0; i < length; ++i) {
|
||||
transports[i].apply(this, arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let { filter, transport } = config
|
||||
const level = resolve(config.level)
|
||||
|
||||
if (filter !== undefined) {
|
||||
if (typeof filter === 'string') {
|
||||
const re = compileGlobPattern(filter)
|
||||
filter = log => re.test(log.namespace)
|
||||
}
|
||||
|
||||
const orig = transport
|
||||
transport = function (log) {
|
||||
if ((level !== undefined && log.level >= level) || filter(log)) {
|
||||
return orig.apply(this, arguments)
|
||||
}
|
||||
}
|
||||
} else if (level !== undefined) {
|
||||
const orig = transport
|
||||
transport = function (log) {
|
||||
if (log.level >= level) {
|
||||
return orig.apply(this, arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return transport
|
||||
}
|
||||
|
||||
let transport = createTransport({
|
||||
// display warnings or above, and all that are enabled via DEBUG or
|
||||
// NODE_DEBUG env
|
||||
filter: process.env.DEBUG || process.env.NODE_DEBUG,
|
||||
level: LEVELS.INFO,
|
||||
|
||||
transport: createConsoleTransport(),
|
||||
})
|
||||
|
||||
export const configure = config => {
|
||||
transport = createTransport(config)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const catchGlobalErrors = logger => {
|
||||
// patch process
|
||||
const onUncaughtException = error => {
|
||||
logger.error('uncaught exception', { error })
|
||||
}
|
||||
const onUnhandledRejection = error => {
|
||||
logger.warn('possibly unhandled rejection', { error })
|
||||
}
|
||||
process.on('uncaughtException', onUncaughtException)
|
||||
process.on('unhandledRejection', onUnhandledRejection)
|
||||
|
||||
// patch EventEmitter
|
||||
const EventEmitter = require('events')
|
||||
const { prototype } = EventEmitter
|
||||
const { emit } = prototype
|
||||
function patchedEmit (event, error) {
|
||||
event === 'error' && !this.listenerCount(event)
|
||||
? logger.error('unhandled error event', { error })
|
||||
: emit.apply(this, arguments)
|
||||
}
|
||||
prototype.emit = patchedEmit
|
||||
|
||||
return () => {
|
||||
process.removeListener('uncaughtException', onUncaughtException)
|
||||
process.removeListener('unhandledRejection', onUnhandledRejection)
|
||||
|
||||
if (prototype.emit === patchedEmit) {
|
||||
prototype.emit = emit
|
||||
}
|
||||
}
|
||||
}
|
||||
62
@xen-orchestra/log/src/index.js
Normal file
62
@xen-orchestra/log/src/index.js
Normal file
@@ -0,0 +1,62 @@
|
||||
import createTransport from './transports/console'
|
||||
import LEVELS from './levels'
|
||||
|
||||
const symbol = typeof Symbol !== 'undefined' ? Symbol.for('@xen-orchestra/log') : '@@@xen-orchestra/log'
|
||||
if (!(symbol in global)) {
|
||||
// the default behavior, without requiring `configure` is to avoid
|
||||
// logging anything unless it's a real error
|
||||
const transport = createTransport()
|
||||
global[symbol] = log => log.level > LEVEL.WARN && transport(log)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function Log (data, level, namespace, message, time) {
|
||||
this.data = data
|
||||
this.level = level
|
||||
this.namespace = namespace
|
||||
this.message = message
|
||||
this.time = time
|
||||
}
|
||||
|
||||
function Logger (namespace) {
|
||||
this._namespace = namespace
|
||||
|
||||
// bind all logging methods
|
||||
for (const name in LEVELS) {
|
||||
const lowerCase = name.toLowerCase()
|
||||
this[lowerCase] = this[lowerCase].bind(this)
|
||||
}
|
||||
}
|
||||
|
||||
const { prototype } = Logger
|
||||
|
||||
for (const name in LEVELS) {
|
||||
const level = LEVELS[name]
|
||||
|
||||
prototype[name.toLowerCase()] = function (message, data) {
|
||||
global[symbol](new Log(data, level, this._namespace, message, new Date()))
|
||||
}
|
||||
}
|
||||
|
||||
prototype.wrap = function (message, fn) {
|
||||
const logger = this
|
||||
const warnAndRethrow = error => {
|
||||
logger.warn(message, { error })
|
||||
throw error
|
||||
}
|
||||
return function () {
|
||||
try {
|
||||
const result = fn.apply(this, arguments)
|
||||
const then = result != null && result.then
|
||||
return typeof then === 'function'
|
||||
? then.call(result, warnAndRethrow)
|
||||
: result
|
||||
} catch (error) {
|
||||
warnAndRethrow(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const createLogger = namespace => new Logger(namespace)
|
||||
export { createLogger }
|
||||
24
@xen-orchestra/log/src/levels.js
Normal file
24
@xen-orchestra/log/src/levels.js
Normal file
@@ -0,0 +1,24 @@
|
||||
const LEVELS = Object.create(null)
|
||||
export { LEVELS as default }
|
||||
|
||||
// https://github.com/trentm/node-bunyan#levels
|
||||
LEVELS.FATAL = 60 // service/app is going to down
|
||||
LEVELS.ERROR = 50 // fatal for current action
|
||||
LEVELS.WARN = 40 // something went wrong but it's not fatal
|
||||
LEVELS.INFO = 30 // detail on unusual but normal operation
|
||||
LEVELS.DEBUG = 20
|
||||
|
||||
export const NAMES = Object.create(null)
|
||||
for (const name in LEVELS) {
|
||||
NAMES[LEVELS[name]] = name
|
||||
}
|
||||
|
||||
export const resolve = level => {
|
||||
if (typeof level === 'string') {
|
||||
level = LEVELS[level.toUpperCase()]
|
||||
}
|
||||
return level
|
||||
}
|
||||
|
||||
Object.freeze(LEVELS)
|
||||
Object.freeze(NAMES)
|
||||
32
@xen-orchestra/log/src/levels.spec.js
Normal file
32
@xen-orchestra/log/src/levels.spec.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { forEach, isInteger } from 'lodash'
|
||||
|
||||
import LEVELS, { NAMES, resolve } from './levels'
|
||||
|
||||
describe('LEVELS', () => {
|
||||
it('maps level names to their integer values', () => {
|
||||
forEach(LEVELS, (value, name) => {
|
||||
expect(isInteger(value)).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('NAMES', () => {
|
||||
it('maps level values to their names', () => {
|
||||
forEach(LEVELS, (value, name) => {
|
||||
expect(NAMES[value]).toBe(name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('resolve()', () => {
|
||||
it('returns level values either from values or names', () => {
|
||||
forEach(LEVELS, value => {
|
||||
expect(resolve(value)).toBe(value)
|
||||
})
|
||||
forEach(NAMES, (name, value) => {
|
||||
expect(resolve(name)).toBe(+value)
|
||||
})
|
||||
})
|
||||
})
|
||||
20
@xen-orchestra/log/src/transports/console.js
Normal file
20
@xen-orchestra/log/src/transports/console.js
Normal file
@@ -0,0 +1,20 @@
|
||||
import LEVELS, { NAMES } from '../levels'
|
||||
|
||||
// Bind console methods (necessary for browsers)
|
||||
const debugConsole = console.log.bind(console)
|
||||
const infoConsole = console.info.bind(console)
|
||||
const warnConsole = console.warn.bind(console)
|
||||
const errorConsole = console.error.bind(console)
|
||||
|
||||
const { ERROR, INFO, WARN } = LEVELS
|
||||
|
||||
const consoleTransport = ({ data, level, namespace, message, time }) => {
|
||||
const fn =
|
||||
level < INFO
|
||||
? debugConsole
|
||||
: level < WARN ? infoConsole : level < ERROR ? warnConsole : errorConsole
|
||||
|
||||
fn('%s - %s - [%s] %s', time.toISOString(), namespace, NAMES[level], message)
|
||||
data != null && fn(data)
|
||||
}
|
||||
export default () => consoleTransport
|
||||
68
@xen-orchestra/log/src/transports/email.js
Normal file
68
@xen-orchestra/log/src/transports/email.js
Normal file
@@ -0,0 +1,68 @@
|
||||
import prettyFormat from 'pretty-format' // eslint-disable-line node/no-extraneous-import
|
||||
import { createTransport } from 'nodemailer' // eslint-disable-line node/no-extraneous-import
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
|
||||
import { evalTemplate, required } from '../utils'
|
||||
import { NAMES } from '../levels'
|
||||
|
||||
export default ({
|
||||
// transport options (https://nodemailer.com/smtp/)
|
||||
auth,
|
||||
authMethod,
|
||||
host,
|
||||
ignoreTLS,
|
||||
port,
|
||||
proxy,
|
||||
requireTLS,
|
||||
secure,
|
||||
service,
|
||||
tls,
|
||||
|
||||
// message options (https://nodemailer.com/message/)
|
||||
bcc,
|
||||
cc,
|
||||
from = required('from'),
|
||||
to = required('to'),
|
||||
subject = '[{{level}} - {{namespace}}] {{time}} {{message}}',
|
||||
}) => {
|
||||
const transporter = createTransport(
|
||||
{
|
||||
auth,
|
||||
authMethod,
|
||||
host,
|
||||
ignoreTLS,
|
||||
port,
|
||||
proxy,
|
||||
requireTLS,
|
||||
secure,
|
||||
service,
|
||||
tls,
|
||||
|
||||
disableFileAccess: true,
|
||||
disableUrlAccess: true,
|
||||
},
|
||||
{
|
||||
bcc,
|
||||
cc,
|
||||
from,
|
||||
to,
|
||||
}
|
||||
)
|
||||
|
||||
return log =>
|
||||
fromCallback(cb =>
|
||||
transporter.sendMail(
|
||||
{
|
||||
subject: evalTemplate(
|
||||
subject,
|
||||
key =>
|
||||
key === 'level'
|
||||
? NAMES[log.level]
|
||||
: key === 'time' ? log.time.toISOString() : log[key]
|
||||
),
|
||||
text: prettyFormat(log.data),
|
||||
},
|
||||
cb
|
||||
)
|
||||
)
|
||||
}
|
||||
7
@xen-orchestra/log/src/transports/memory.js
Normal file
7
@xen-orchestra/log/src/transports/memory.js
Normal file
@@ -0,0 +1,7 @@
|
||||
export default () => {
|
||||
const memoryLogger = log => {
|
||||
logs.push(log)
|
||||
}
|
||||
const logs = (memoryLogger.logs = [])
|
||||
return memoryLogger
|
||||
}
|
||||
42
@xen-orchestra/log/src/transports/syslog.js
Normal file
42
@xen-orchestra/log/src/transports/syslog.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import splitHost from 'split-host' // eslint-disable-line node/no-extraneous-import node/no-missing-import
|
||||
import { createClient, Facility, Severity, Transport } from 'syslog-client' // eslint-disable-line node/no-extraneous-import node/no-missing-import
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { startsWith } from 'lodash'
|
||||
|
||||
import LEVELS from '../levels'
|
||||
|
||||
// https://github.com/paulgrove/node-syslog-client#syslogseverity
|
||||
const LEVEL_TO_SEVERITY = {
|
||||
[LEVELS.FATAL]: Severity.Critical,
|
||||
[LEVELS.ERROR]: Severity.Error,
|
||||
[LEVELS.WARN]: Severity.Warning,
|
||||
[LEVELS.INFO]: Severity.Informational,
|
||||
[LEVELS.DEBUG]: Severity.Debug,
|
||||
}
|
||||
|
||||
const facility = Facility.User
|
||||
|
||||
export default target => {
|
||||
const opts = {}
|
||||
if (target !== undefined) {
|
||||
if (startsWith(target, 'tcp://')) {
|
||||
target = target.slice(6)
|
||||
opts.transport = Transport.Tcp
|
||||
} else if (startsWith(target, 'udp://')) {
|
||||
target = target.slice(6)
|
||||
opts.transport = Transport.Ucp
|
||||
}
|
||||
|
||||
({ host: target, port: opts.port } = splitHost(target))
|
||||
}
|
||||
|
||||
const client = createClient(target, opts)
|
||||
|
||||
return log =>
|
||||
fromCallback(cb =>
|
||||
client.log(log.message, {
|
||||
facility,
|
||||
severity: LEVEL_TO_SEVERITY[log.level],
|
||||
})
|
||||
)
|
||||
}
|
||||
64
@xen-orchestra/log/src/utils.js
Normal file
64
@xen-orchestra/log/src/utils.js
Normal file
@@ -0,0 +1,64 @@
|
||||
import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const TPL_RE = /\{\{(.+?)\}\}/g
|
||||
export const evalTemplate = (tpl, data) => {
|
||||
const getData =
|
||||
typeof data === 'function'
|
||||
? (_, key) => data(key)
|
||||
: (_, key) => data[key]
|
||||
|
||||
return tpl.replace(TPL_RE, getData)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const compileGlobPatternFragment = pattern =>
|
||||
pattern
|
||||
.split('*')
|
||||
.map(escapeRegExp)
|
||||
.join('.*')
|
||||
|
||||
export const compileGlobPattern = pattern => {
|
||||
const no = []
|
||||
const yes = []
|
||||
pattern.split(/[\s,]+/).forEach(pattern => {
|
||||
if (pattern[0] === '-') {
|
||||
no.push(pattern.slice(1))
|
||||
} else {
|
||||
yes.push(pattern)
|
||||
}
|
||||
})
|
||||
|
||||
const raw = ['^']
|
||||
|
||||
if (no.length !== 0) {
|
||||
raw.push('(?!', no.map(compileGlobPatternFragment).join('|'), ')')
|
||||
}
|
||||
|
||||
if (yes.length !== 0) {
|
||||
raw.push('(?:', yes.map(compileGlobPatternFragment).join('|'), ')')
|
||||
} else {
|
||||
raw.push('.*')
|
||||
}
|
||||
|
||||
raw.push('$')
|
||||
|
||||
return new RegExp(raw.join(''))
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const required = name => {
|
||||
throw new Error(`missing required arg ${name}`)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const serializeError = error => ({
|
||||
...error,
|
||||
message: error.message,
|
||||
name: error.name,
|
||||
stack: error.stack,
|
||||
})
|
||||
13
@xen-orchestra/log/src/utils.spec.js
Normal file
13
@xen-orchestra/log/src/utils.spec.js
Normal file
@@ -0,0 +1,13 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { compileGlobPattern } from './utils'
|
||||
|
||||
describe('compileGlobPattern()', () => {
|
||||
it('works', () => {
|
||||
const re = compileGlobPattern('foo, ba*, -bar')
|
||||
expect(re.test('foo')).toBe(true)
|
||||
expect(re.test('bar')).toBe(false)
|
||||
expect(re.test('baz')).toBe(true)
|
||||
expect(re.test('qux')).toBe(false)
|
||||
})
|
||||
})
|
||||
1
@xen-orchestra/log/transports
Symbolic link
1
@xen-orchestra/log/transports
Symbolic link
@@ -0,0 +1 @@
|
||||
dist/transports
|
||||
3
@xen-orchestra/mixin/.babelrc.js
Normal file
3
@xen-orchestra/mixin/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
@xen-orchestra/mixin/.npmignore
Normal file
24
@xen-orchestra/mixin/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
49
@xen-orchestra/mixin/README.md
Normal file
49
@xen-orchestra/mixin/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
**TODO**
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
50
@xen-orchestra/mixin/package.json
Normal file
50
@xen-orchestra/mixin/package.json
Normal file
@@ -0,0 +1,50 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/mixin",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/mixin",
|
||||
"bugs": "https://github.com/vatesfr/xo-web/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
128
@xen-orchestra/mixin/src/index.js
Normal file
128
@xen-orchestra/mixin/src/index.js
Normal file
@@ -0,0 +1,128 @@
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const { defineProperties, getOwnPropertyDescriptor } = Object
|
||||
|
||||
const isIgnoredProperty = name => name[0] === '_' || name === 'constructor'
|
||||
|
||||
const IGNORED_STATIC_PROPERTIES = {
|
||||
__proto__: null,
|
||||
|
||||
arguments: true,
|
||||
caller: true,
|
||||
length: true,
|
||||
name: true,
|
||||
prototype: true,
|
||||
}
|
||||
const isIgnoredStaticProperty = name => name in IGNORED_STATIC_PROPERTIES
|
||||
|
||||
const ownKeys =
|
||||
(typeof Reflect !== 'undefined' && Reflect.ownKeys) ||
|
||||
(({ getOwnPropertyNames: names, getOwnPropertySymbols: symbols }) =>
|
||||
symbols !== undefined ? obj => names(obj).concat(symbols(obj)) : names)(Object)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const mixin = Mixins => Class => {
|
||||
if (__DEV__ && !Array.isArray(Mixins)) {
|
||||
throw new TypeError('Mixins should be an array')
|
||||
}
|
||||
|
||||
const { name } = Class
|
||||
|
||||
// Copy properties of plain object mix-ins to the prototype.
|
||||
{
|
||||
const allMixins = Mixins
|
||||
Mixins = []
|
||||
const { prototype } = Class
|
||||
const descriptors = { __proto__: null }
|
||||
allMixins.forEach(Mixin => {
|
||||
if (typeof Mixin === 'function') {
|
||||
Mixins.push(Mixin)
|
||||
return
|
||||
}
|
||||
|
||||
for (const prop of ownKeys(Mixin)) {
|
||||
if (__DEV__ && prop in prototype) {
|
||||
throw new Error(`${name}#${prop} is already defined`)
|
||||
}
|
||||
|
||||
;(descriptors[prop] = getOwnPropertyDescriptor(
|
||||
Mixin,
|
||||
prop
|
||||
)).enumerable = false // Object methods are enumerable but class methods are not.
|
||||
}
|
||||
})
|
||||
defineProperties(prototype, descriptors)
|
||||
}
|
||||
|
||||
const n = Mixins.length
|
||||
|
||||
function DecoratedClass (...args) {
|
||||
const instance = new Class(...args)
|
||||
|
||||
for (let i = 0; i < n; ++i) {
|
||||
const Mixin = Mixins[i]
|
||||
const { prototype } = Mixin
|
||||
const mixinInstance = new Mixin(instance, ...args)
|
||||
const descriptors = { __proto__: null }
|
||||
const props = ownKeys(prototype)
|
||||
for (let j = 0, m = props.length; j < m; ++j) {
|
||||
const prop = props[j]
|
||||
|
||||
if (isIgnoredProperty(prop)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (prop in instance) {
|
||||
throw new Error(`${name}#${prop} is already defined`)
|
||||
}
|
||||
|
||||
descriptors[prop] = getBoundPropertyDescriptor(
|
||||
prototype,
|
||||
prop,
|
||||
mixinInstance
|
||||
)
|
||||
}
|
||||
defineProperties(instance, descriptors)
|
||||
}
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
// Copy original and mixed-in static properties on Decorator class.
|
||||
const descriptors = { __proto__: null }
|
||||
ownKeys(Class).forEach(prop => {
|
||||
let descriptor
|
||||
if (!(
|
||||
// Special properties are not defined...
|
||||
isIgnoredStaticProperty(prop) &&
|
||||
|
||||
// if they already exist...
|
||||
(descriptor = getOwnPropertyDescriptor(DecoratedClass, prop)) !== undefined &&
|
||||
|
||||
// and are not configurable.
|
||||
!descriptor.configurable
|
||||
)) {
|
||||
descriptors[prop] = getOwnPropertyDescriptor(Class, prop)
|
||||
}
|
||||
})
|
||||
Mixins.forEach(Mixin => {
|
||||
ownKeys(Mixin).forEach(prop => {
|
||||
if (isIgnoredStaticProperty(prop)) {
|
||||
return
|
||||
}
|
||||
|
||||
if (__DEV__ && prop in descriptors) {
|
||||
throw new Error(`${name}.${prop} is already defined`)
|
||||
}
|
||||
|
||||
descriptors[prop] = getOwnPropertyDescriptor(Mixin, prop)
|
||||
})
|
||||
})
|
||||
defineProperties(DecoratedClass, descriptors)
|
||||
|
||||
return DecoratedClass
|
||||
}
|
||||
export { mixin as default }
|
||||
102
CHANGELOG.md
102
CHANGELOG.md
@@ -1,9 +1,104 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.20.0** (planned 2018-05-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
|
||||
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
|
||||
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
|
||||
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
|
||||
|
||||
### Bugs
|
||||
|
||||
## **5.19.0** (2018-05-01)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
|
||||
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
|
||||
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
|
||||
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
|
||||
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
|
||||
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
|
||||
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
|
||||
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
|
||||
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
|
||||
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
|
||||
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
|
||||
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
|
||||
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
|
||||
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
|
||||
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
|
||||
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
|
||||
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
|
||||
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
|
||||
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
|
||||
|
||||
### Bugs
|
||||
|
||||
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
|
||||
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
|
||||
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
|
||||
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
|
||||
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
|
||||
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
|
||||
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
|
||||
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
|
||||
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
|
||||
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
|
||||
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
|
||||
|
||||
## **5.18.0** (2018-03-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
|
||||
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
|
||||
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
|
||||
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
|
||||
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
|
||||
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
|
||||
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
|
||||
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
|
||||
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
|
||||
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
|
||||
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
|
||||
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
|
||||
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
|
||||
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
|
||||
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
|
||||
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
|
||||
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
|
||||
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
|
||||
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
|
||||
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
|
||||
|
||||
|
||||
### Bugs
|
||||
|
||||
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
|
||||
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
|
||||
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
|
||||
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
|
||||
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
|
||||
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
|
||||
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
|
||||
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
|
||||
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
|
||||
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
|
||||
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
|
||||
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
|
||||
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
|
||||
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
|
||||
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
|
||||
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
|
||||
|
||||
## **5.17.0** (2018-03-02)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
|
||||
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
|
||||
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
|
||||
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
|
||||
@@ -22,6 +117,9 @@
|
||||
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
|
||||
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
|
||||
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
|
||||
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
|
||||
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
|
||||
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -41,6 +139,7 @@
|
||||
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
|
||||
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
|
||||
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
|
||||
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
|
||||
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
|
||||
@@ -50,6 +149,8 @@
|
||||
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
|
||||
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
|
||||
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -79,6 +180,7 @@
|
||||
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
|
||||
|
||||
### Bugs
|
||||
|
||||
|
||||
8
flow-typed/lodash.js
vendored
8
flow-typed/lodash.js
vendored
@@ -1,4 +1,12 @@
|
||||
declare module 'lodash' {
|
||||
declare export function forEach<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: (V, K) => void
|
||||
): void
|
||||
declare export function groupBy<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: K | ((V, K) => string)
|
||||
): { [string]: V[] }
|
||||
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
|
||||
declare export function isEmpty(mixed): boolean
|
||||
declare export function keyBy<T>(array: T[], iteratee: string): boolean
|
||||
|
||||
3
flow-typed/promise-toolbox.js
vendored
3
flow-typed/promise-toolbox.js
vendored
@@ -5,6 +5,9 @@ declare module 'promise-toolbox' {
|
||||
reject: T => void,
|
||||
resolve: T => void,
|
||||
|}
|
||||
declare export function fromCallback<T>(
|
||||
(cb: (error: any, value: T) => void) => void
|
||||
): Promise<T>
|
||||
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
|
||||
declare export function ignoreErrors(): Promise<void>
|
||||
declare export function timeout<T>(delay: number): Promise<T>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"@babel/register": "^7.0.0-beta.44",
|
||||
"@babel/register": "^7.0.0-beta.46",
|
||||
"babel-7-jest": "^21.3.2",
|
||||
"babel-eslint": "^8.1.2",
|
||||
"benchmark": "^2.1.4",
|
||||
@@ -34,16 +34,19 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-vmdk-to-vhd/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"transform": {
|
||||
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
|
||||
"/@xen-orchestra/fs/.+\\.jsx?$": "babel-7-jest",
|
||||
"/@xen-orchestra/log/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/vhd-lib/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-vmdk-to-vhd/.+\\.jsx?$": "babel-7-jest",
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -30,9 +30,9 @@
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -28,10 +28,10 @@
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-cli",
|
||||
"version": "0.0.0",
|
||||
"version": "0.0.1",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -26,10 +26,11 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"struct-fu": "^1.2.0",
|
||||
"@nraynaud/xo-fs": "^0.0.5",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"babel-runtime": "^6.22.0",
|
||||
"exec-promise": "^0.7.0"
|
||||
"exec-promise": "^0.7.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
@@ -38,14 +39,18 @@
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"babel-preset-stage-3": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"execa": "^0.10.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepare": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
|
||||
15
packages/vhd-cli/src/commands/check.js
Normal file
15
packages/vhd-cli/src/commands/check.js
Normal file
@@ -0,0 +1,15 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
for (const vhd of args) {
|
||||
try {
|
||||
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
|
||||
console.log('ok:', vhd)
|
||||
} catch (error) {
|
||||
console.error('nok:', vhd, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
12
packages/vhd-cli/src/commands/info.js
Normal file
12
packages/vhd-cli/src/commands/info.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd.header)
|
||||
console.log(vhd.footer)
|
||||
}
|
||||
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
@@ -0,0 +1,21 @@
|
||||
import path from 'path'
|
||||
import { createSyntheticStream } from 'vhd-lib'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
|
||||
export default async function main (args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> <output VHD>`
|
||||
}
|
||||
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
return new Promise((resolve, reject) => {
|
||||
createSyntheticStream(handler, path.resolve(args[0]))
|
||||
.on('error', reject)
|
||||
.pipe(
|
||||
createWriteStream(args[1])
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -1,19 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import commands from './commands'
|
||||
|
||||
execPromise(async args => {
|
||||
const vhd = new Vhd(
|
||||
new RemoteHandlerLocal({ url: 'file:///' }),
|
||||
resolve(args[0])
|
||||
function runCommand (commands, [command, ...args]) {
|
||||
if (command === undefined || command === '-h' || command === '--help') {
|
||||
command = 'help'
|
||||
}
|
||||
|
||||
const fn = commands[command]
|
||||
|
||||
if (fn === undefined) {
|
||||
if (command === 'help') {
|
||||
return `Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${this.command} ${command}`)
|
||||
.join('\n\n')}`
|
||||
}
|
||||
|
||||
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
|
||||
}
|
||||
|
||||
return fn.call(
|
||||
{
|
||||
__proto__: this,
|
||||
command: `${this.command} ${command}`,
|
||||
},
|
||||
args
|
||||
)
|
||||
}
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd._header)
|
||||
console.log(vhd._footer)
|
||||
})
|
||||
execPromise(
|
||||
runCommand.bind(
|
||||
{
|
||||
command: 'vhd-cli',
|
||||
runCommand,
|
||||
},
|
||||
commands
|
||||
)
|
||||
)
|
||||
|
||||
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import command from './commands/info'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(10000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('can run the command', async () => {
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
|
||||
await command(['empty.vhd'])
|
||||
})
|
||||
@@ -1,461 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import fu from 'struct-fu'
|
||||
import { dirname } from 'path'
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
/* eslint-disable no-unused-vars */
|
||||
|
||||
const HARD_DISK_TYPE_DIFFERENCING = 4
|
||||
const HARD_DISK_TYPE_DYNAMIC = 3
|
||||
const HARD_DISK_TYPE_FIXED = 2
|
||||
const PLATFORM_CODE_NONE = 0
|
||||
export const SECTOR_SIZE = 512
|
||||
|
||||
/* eslint-enable no-unused vars */
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
fu.struct('dataOffset', [
|
||||
fu.uint32('high'), // 16
|
||||
fu.uint32('low'), // 20
|
||||
]),
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
fu.struct('originalSize', [
|
||||
// At the creation, current size of the hard disk.
|
||||
fu.uint32('high'), // 40
|
||||
fu.uint32('low'), // 44
|
||||
]),
|
||||
fu.struct('currentSize', [
|
||||
// Current size of the virtual disk. At the creation: currentSize = originalSize.
|
||||
fu.uint32('high'), // 48
|
||||
fu.uint32('low'), // 52
|
||||
]),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.byte('reserved', 426), // 86
|
||||
])
|
||||
const FOOTER_SIZE = fuFooter.size
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
|
||||
fu.struct('tableOffset', [
|
||||
// Absolute byte offset of the Block Allocation Table.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.byte('reserved1', 4),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
fu.struct('platformDataOffset', [
|
||||
// Absolute byte offset of the locator data.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
],
|
||||
8
|
||||
),
|
||||
fu.byte('reserved2', 256),
|
||||
])
|
||||
const HEADER_SIZE = fuHeader.size
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
|
||||
// bytes[] bit manipulation
|
||||
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
|
||||
const setBit = (map, bit) => {
|
||||
map[bit >> 3] |= 1 << (bit & 7)
|
||||
}
|
||||
const unsetBit = (map, bit) => {
|
||||
map[bit >> 3] &= ~(1 << (bit & 7))
|
||||
}
|
||||
|
||||
const addOffsets = (...offsets) =>
|
||||
offsets.reduce(
|
||||
(a, b) =>
|
||||
b == null
|
||||
? a
|
||||
: typeof b === 'object'
|
||||
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
|
||||
: { bytes: a.bytes + b, bits: a.bits },
|
||||
{ bytes: 0, bits: 0 }
|
||||
)
|
||||
|
||||
const pack = (field, value, buf, offset) => {
|
||||
field.pack(value, buf, addOffsets(field.offset, offset))
|
||||
}
|
||||
|
||||
const unpack = (field, buf, offset) =>
|
||||
field.unpack(buf, addOffsets(field.offset, offset))
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const streamToNewBuffer = stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let length = 0
|
||||
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(Buffer.concat(chunks, length))
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
const streamToExistingBuffer = (
|
||||
stream,
|
||||
buffer,
|
||||
offset = 0,
|
||||
end = buffer.length
|
||||
) =>
|
||||
new Promise((resolve, reject) => {
|
||||
assert(offset >= 0)
|
||||
assert(end > offset)
|
||||
assert(end <= buffer.length)
|
||||
|
||||
let i = offset
|
||||
|
||||
const onData = chunk => {
|
||||
const prev = i
|
||||
i += chunk.length
|
||||
|
||||
if (i > end) {
|
||||
return onError(new Error('too much data'))
|
||||
}
|
||||
|
||||
chunk.copy(buffer, prev)
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(i - offset)
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
const computeChecksum = (struct, buf, offset = 0) => {
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumField = struct.fields.checksum
|
||||
const checksumOffset = offset + checksumField.offset
|
||||
for (let i = offset, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = offset + struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
return ~sum >>> 0
|
||||
}
|
||||
|
||||
const verifyChecksum = (struct, buf, offset) =>
|
||||
unpack(struct.fields.checksum, buf, offset) ===
|
||||
computeChecksum(struct, buf, offset)
|
||||
|
||||
const getParentLocatorSize = parentLocatorEntry => {
|
||||
const { platformDataSpace } = parentLocatorEntry
|
||||
|
||||
if (platformDataSpace < SECTOR_SIZE) {
|
||||
return platformDataSpace * SECTOR_SIZE
|
||||
}
|
||||
|
||||
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Euclidean division, returns the quotient and the remainder of a / b.
|
||||
const div = (a, b) => [Math.floor(a / b), a % b]
|
||||
|
||||
export default class Vhd {
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
|
||||
this._blockAllocationTable = null
|
||||
this._blockBitmapSize = null
|
||||
this._footer = null
|
||||
this._header = null
|
||||
this._parent = null
|
||||
this._sectorsPerBlock = null
|
||||
}
|
||||
|
||||
// Read `length` bytes starting from `begin`.
|
||||
//
|
||||
// - if `buffer`: it is filled starting from `offset`, and the
|
||||
// number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_read (begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
return this._handler
|
||||
.createReadStream(this._path, {
|
||||
end: begin + length - 1,
|
||||
start: begin,
|
||||
})
|
||||
.then(
|
||||
buf
|
||||
? stream =>
|
||||
streamToExistingBuffer(
|
||||
stream,
|
||||
buf,
|
||||
offset,
|
||||
(offset || 0) + length
|
||||
)
|
||||
: streamToNewBuffer
|
||||
)
|
||||
}
|
||||
|
||||
// - if `buffer`: it is filled with 0 starting from `offset`, and
|
||||
// the number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_zeroes (length, buf, offset = 0) {
|
||||
if (buf) {
|
||||
assert(offset >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const end = offset + length
|
||||
assert(end <= buf.length)
|
||||
|
||||
buf.fill(0, offset, end)
|
||||
return Promise.resolve(length)
|
||||
}
|
||||
|
||||
return Promise.resolve(Buffer.alloc(length))
|
||||
}
|
||||
|
||||
// Return the position of a block in the VHD or undefined if not found.
|
||||
_getBlockAddress (block) {
|
||||
assert(block >= 0)
|
||||
assert(block < this._header.maxTableEntries)
|
||||
|
||||
const blockAddr = this._blockAllocationTable[block]
|
||||
if (blockAddr !== 0xffffffff) {
|
||||
return blockAddr * SECTOR_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
|
||||
if (!verifyChecksum(fuFooter, buf)) {
|
||||
throw new Error('footer checksum does not match')
|
||||
}
|
||||
|
||||
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
|
||||
throw new Error('header checksum does not match')
|
||||
}
|
||||
|
||||
return this._initMetadata(
|
||||
unpack(fuHeader, buf, FOOTER_SIZE),
|
||||
unpack(fuFooter, buf)
|
||||
)
|
||||
}
|
||||
|
||||
async _initMetadata (header, footer) {
|
||||
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
|
||||
assert(sectorsPerBlock % 1 === 0)
|
||||
|
||||
// 1 bit per sector, rounded up to full sectors
|
||||
this._blockBitmapSize =
|
||||
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
assert(this._blockBitmapSize === SECTOR_SIZE)
|
||||
|
||||
this._footer = footer
|
||||
this._header = header
|
||||
this.size = uint32ToUint64(this._footer.currentSize)
|
||||
|
||||
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
|
||||
const parent = new Vhd(
|
||||
this._handler,
|
||||
`${dirname(this._path)}/${header.parentUnicodeName}`
|
||||
)
|
||||
await parent.readHeaderAndFooter()
|
||||
await parent.readBlockAllocationTable()
|
||||
|
||||
this._parent = parent
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readBlockAllocationTable () {
|
||||
const { maxTableEntries, tableOffset } = this._header
|
||||
const fuTable = fu.uint32(maxTableEntries)
|
||||
|
||||
this._blockAllocationTable = unpack(
|
||||
fuTable,
|
||||
await this._read(uint32ToUint64(tableOffset), fuTable.size)
|
||||
)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
// read a single sector in a block
|
||||
async _readBlockSector (block, sector, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
assert(begin + length <= SECTOR_SIZE)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const blockBitmapSize = this._blockBitmapSize
|
||||
const parent = this._parent
|
||||
|
||||
if (
|
||||
blockAddr &&
|
||||
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
|
||||
) {
|
||||
return this._read(
|
||||
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
return parent
|
||||
? parent._readBlockSector(block, sector, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
_readBlock (block, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const { blockSize } = this._header
|
||||
assert(begin + length <= blockSize)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const parent = this._parent
|
||||
|
||||
if (!blockAddr) {
|
||||
return parent
|
||||
? parent._readBlock(block, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
if (!parent) {
|
||||
return this._read(
|
||||
blockAddr + this._blockBitmapSize + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
// FIXME: we should read as many sectors in a single pass as
|
||||
// possible for maximum perf.
|
||||
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
|
||||
return this._readBlockSector(
|
||||
block,
|
||||
sector,
|
||||
beginInSector,
|
||||
Math.min(length, SECTOR_SIZE - beginInSector),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
read (buf, begin, length = buf.length, offset) {
|
||||
assert(Buffer.isBuffer(buf))
|
||||
assert(begin >= 0)
|
||||
|
||||
const { size } = this
|
||||
if (begin >= size) {
|
||||
return Promise.resolve(0)
|
||||
}
|
||||
|
||||
const { blockSize } = this._header
|
||||
const [block, beginInBlock] = div(begin, blockSize)
|
||||
|
||||
return this._readBlock(
|
||||
block,
|
||||
beginInBlock,
|
||||
Math.min(length, blockSize - beginInBlock, size - begin),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
}
|
||||
3
packages/vhd-lib/.babelrc.js
Normal file
3
packages/vhd-lib/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
56
packages/vhd-lib/package.json
Normal file
56
packages/vhd-lib/package.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"execa": "^0.10.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"fs-promise": "^2.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
7
packages/vhd-lib/src/_bitmap.js
Normal file
7
packages/vhd-lib/src/_bitmap.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const MASK = 0x80
|
||||
|
||||
export const set = (map, bit) => {
|
||||
map[bit >> 3] |= MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0
|
||||
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default function computeGeometryForSize (size) {
|
||||
const totalSectors = Math.ceil(size / 512)
|
||||
let sectorsPerTrackCylinder
|
||||
let heads
|
||||
let cylinderTimesHeads
|
||||
if (totalSectors > 65535 * 16 * 255) {
|
||||
throw Error('disk is too big')
|
||||
}
|
||||
// straight copypasta from the file spec appendix on CHS Calculation
|
||||
if (totalSectors >= 65535 * 16 * 63) {
|
||||
sectorsPerTrackCylinder = 255
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
} else {
|
||||
sectorsPerTrackCylinder = 17
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
|
||||
if (heads < 4) {
|
||||
heads = 4
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
|
||||
sectorsPerTrackCylinder = 31
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024) {
|
||||
sectorsPerTrackCylinder = 63
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
}
|
||||
const cylinders = Math.ceil(cylinderTimesHeads / heads)
|
||||
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
|
||||
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
|
||||
}
|
||||
30
packages/vhd-lib/src/_constants.js
Normal file
30
packages/vhd-lib/src/_constants.js
Normal file
@@ -0,0 +1,30 @@
|
||||
export const BLOCK_UNUSED = 0xffffffff
|
||||
|
||||
// This lib has been extracted from the Xen Orchestra project.
|
||||
export const CREATOR_APPLICATION = 'xo '
|
||||
|
||||
// Sizes in bytes.
|
||||
export const FOOTER_SIZE = 512
|
||||
export const HEADER_SIZE = 1024
|
||||
export const SECTOR_SIZE = 512
|
||||
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
|
||||
|
||||
export const FOOTER_COOKIE = 'conectix'
|
||||
export const HEADER_COOKIE = 'cxsparse'
|
||||
|
||||
export const DISK_TYPE_FIXED = 2
|
||||
export const DISK_TYPE_DYNAMIC = 3
|
||||
export const DISK_TYPE_DIFFERENCING = 4
|
||||
|
||||
export const PARENT_LOCATOR_ENTRIES = 8
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_WI2R = 0x57693272
|
||||
export const PLATFORM_WI2K = 0x5769326b
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
export const FILE_FORMAT_VERSION = 1 << 16
|
||||
export const HEADER_VERSION = 1 << 16
|
||||
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import { v4 as generateUuid } from 'uuid'
|
||||
|
||||
import { checksumStruct, fuFooter, fuHeader } from './_structs'
|
||||
import {
|
||||
CREATOR_APPLICATION,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_FIXED,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PLATFORM_WI2K,
|
||||
} from './_constants'
|
||||
|
||||
export function createFooter (
|
||||
size,
|
||||
timestamp,
|
||||
geometry,
|
||||
dataOffset,
|
||||
diskType = DISK_TYPE_FIXED
|
||||
) {
|
||||
const footer = fuFooter.pack({
|
||||
cookie: FOOTER_COOKIE,
|
||||
features: 2,
|
||||
fileFormatVersion: FILE_FORMAT_VERSION,
|
||||
dataOffset,
|
||||
timestamp,
|
||||
creatorApplication: CREATOR_APPLICATION,
|
||||
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
|
||||
originalSize: size,
|
||||
currentSize: size,
|
||||
diskGeometry: geometry,
|
||||
diskType,
|
||||
uuid: generateUuid(null, []),
|
||||
})
|
||||
checksumStruct(footer, fuFooter)
|
||||
return footer
|
||||
}
|
||||
|
||||
export function createHeader (
|
||||
maxTableEntries,
|
||||
tableOffset = HEADER_SIZE + FOOTER_SIZE,
|
||||
blockSize = VHD_BLOCK_SIZE_BYTES
|
||||
) {
|
||||
const header = fuHeader.pack({
|
||||
cookie: HEADER_COOKIE,
|
||||
tableOffset,
|
||||
headerVersion: HEADER_VERSION,
|
||||
maxTableEntries,
|
||||
blockSize,
|
||||
})
|
||||
checksumStruct(header, fuHeader)
|
||||
return header
|
||||
}
|
||||
121
packages/vhd-lib/src/_structs.js
Normal file
121
packages/vhd-lib/src/_structs.js
Normal file
@@ -0,0 +1,121 @@
|
||||
import assert from 'assert'
|
||||
import fu from 'struct-fu'
|
||||
|
||||
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
const uint64Undefinable = fu.derive(
|
||||
fu.uint32(2),
|
||||
number =>
|
||||
number === undefined
|
||||
? [0xffffffff, 0xffffffff]
|
||||
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ =>
|
||||
_[0] === 0xffffffff && _[1] === 0xffffffff
|
||||
? undefined
|
||||
: _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
export const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
uint64Undefinable('dataOffset'), // offset of the header
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
|
||||
|
||||
export const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
uint64Undefinable('dataOffset'),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
assert.strictEqual(fuHeader.size, HEADER_SIZE)
|
||||
|
||||
export const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
export const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
export function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
37
packages/vhd-lib/src/chain.js
Normal file
37
packages/vhd-lib/src/chain.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { dirname, relative } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING } from './_constants'
|
||||
|
||||
export default async function chain (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockAllocationTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.setUniqueParentLocator(parentName)
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
}
|
||||
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
|
||||
export default asyncIteratorToStream(async function * (size, blockParser) {
|
||||
const geometry = computeGeometryForSize(size)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry
|
||||
)
|
||||
let position = 0
|
||||
|
||||
function * filePadding (paddingLength) {
|
||||
if (paddingLength > 0) {
|
||||
const chunkSize = 1024 * 1024 // 1Mo
|
||||
for (
|
||||
let paddingPosition = 0;
|
||||
paddingPosition + chunkSize < paddingLength;
|
||||
paddingPosition += chunkSize
|
||||
) {
|
||||
yield Buffer.alloc(chunkSize)
|
||||
}
|
||||
yield Buffer.alloc(paddingLength % chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
let next
|
||||
while ((next = await blockParser.next()) !== null) {
|
||||
const paddingLength = next.offsetBytes - position
|
||||
if (paddingLength < 0) {
|
||||
throw new Error('Received out of order blocks')
|
||||
}
|
||||
yield * filePadding(paddingLength)
|
||||
yield next.data
|
||||
position = next.offsetBytes + next.data.length
|
||||
}
|
||||
yield * filePadding(actualSize - position)
|
||||
yield footer
|
||||
})
|
||||
143
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
143
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
@@ -0,0 +1,143 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter, createHeader } from './_createFooterHeader'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
import { set as setBitmap } from './_bitmap'
|
||||
|
||||
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
||||
|
||||
/**
|
||||
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
|
||||
*/
|
||||
function createBAT (
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
) {
|
||||
const vhdOccupationTable = []
|
||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||
blockAddressList.forEach(blockPosition => {
|
||||
const scaled = blockPosition / VHD_BLOCK_SIZE_BYTES
|
||||
const vhdTableIndex = Math.floor(scaled)
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
// not using bit operators to avoid the int32 coercion, that way we can go to 53 bits
|
||||
vhdOccupationTable[vhdTableIndex] =
|
||||
(vhdOccupationTable[vhdTableIndex] || 0) +
|
||||
Math.pow(2, (scaled % 1) * ratio)
|
||||
})
|
||||
return vhdOccupationTable
|
||||
}
|
||||
|
||||
function createBitmap (bitmapSize, ratio, vhdOccupationBucket) {
|
||||
const bitmap = Buffer.alloc(bitmapSize)
|
||||
for (let i = 0; i < VHD_BLOCK_SIZE_SECTORS / ratio; i++) {
|
||||
// do not shift to avoid int32 coercion
|
||||
if ((vhdOccupationBucket * Math.pow(2, -i)) & 1) {
|
||||
for (let j = 0; j < ratio; j++) {
|
||||
setBitmap(bitmap, i * ratio + j)
|
||||
}
|
||||
}
|
||||
}
|
||||
return bitmap
|
||||
}
|
||||
|
||||
function * yieldIfNotEmpty (buffer) {
|
||||
if (buffer.length > 0) {
|
||||
yield buffer
|
||||
}
|
||||
}
|
||||
|
||||
async function * generateFileContent (
|
||||
blockIterator,
|
||||
bitmapSize,
|
||||
ratio,
|
||||
vhdOccupationTable
|
||||
) {
|
||||
let currentVhdBlockIndex = -1
|
||||
let currentBlockBuffer = Buffer.alloc(0)
|
||||
for await (const next of blockIterator) {
|
||||
const batEntry = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
|
||||
if (batEntry !== currentVhdBlockIndex) {
|
||||
yield * yieldIfNotEmpty(currentBlockBuffer)
|
||||
currentBlockBuffer = Buffer.alloc(VHD_BLOCK_SIZE_BYTES)
|
||||
currentVhdBlockIndex = batEntry
|
||||
yield createBitmap(bitmapSize, ratio, vhdOccupationTable[batEntry])
|
||||
}
|
||||
next.data.copy(currentBlockBuffer, next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
}
|
||||
yield * yieldIfNotEmpty(currentBlockBuffer)
|
||||
}
|
||||
|
||||
export default asyncIteratorToStream(async function * (
|
||||
diskSize,
|
||||
incomingBlockSize,
|
||||
blockAddressList,
|
||||
blockIterator
|
||||
) {
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
|
||||
if (ratio % 1 !== 0) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
)
|
||||
}
|
||||
if (ratio > 53) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
|
||||
)
|
||||
}
|
||||
|
||||
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
|
||||
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
|
||||
|
||||
const batPosition = FOOTER_SIZE + HEADER_SIZE
|
||||
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
|
||||
const geometry = computeGeometryForSize(diskSize)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry,
|
||||
FOOTER_SIZE,
|
||||
DISK_TYPE_DYNAMIC
|
||||
)
|
||||
const header = createHeader(
|
||||
maxTableEntries,
|
||||
batPosition,
|
||||
VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
const bitmapSize =
|
||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||
const vhdOccupationTable = createBAT(
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
)
|
||||
yield footer
|
||||
yield header
|
||||
yield bat
|
||||
yield * generateFileContent(
|
||||
blockIterator,
|
||||
bitmapSize,
|
||||
ratio,
|
||||
vhdOccupationTable
|
||||
)
|
||||
yield footer
|
||||
})
|
||||
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
@@ -0,0 +1,153 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { dirname, resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
const resolveRelativeFromFile = (file, path) =>
|
||||
resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
export default asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockAllocationTable()
|
||||
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// this is the root VHD
|
||||
const rootVhd = vhds[nVhds - 1]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: set parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: FOOTER_SIZE + HEADER_SIZE,
|
||||
parentTimestamp: rootVhd.header.parentTimestamp,
|
||||
parentUnicodeName: rootVhd.header.parentUnicodeName,
|
||||
parentUuid: rootVhd.header.parentUuid,
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(vhd.batSize)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
dataOffset: FOOTER_SIZE,
|
||||
diskType: rootVhd.footer.diskType,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil(
|
||||
(header.tableOffset + bat.length) / SECTOR_SIZE
|
||||
);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
// TODO: for generic usage the bitmap needs to be properly computed for each block
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
const isRootVhd = vhd === rootVhd
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
if (isRootVhd) {
|
||||
yield Buffer.alloc((n - i) * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (isRootVhd) {
|
||||
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
8
packages/vhd-lib/src/index.js
Normal file
8
packages/vhd-lib/src/index.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export { default } from './vhd'
|
||||
export { default as chainVhd } from './chain'
|
||||
export { default as createReadableRawStream } from './createReadableRawStream'
|
||||
export {
|
||||
default as createReadableSparseStream,
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
@@ -2,25 +2,25 @@
|
||||
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import LocalHandler from './remote-handlers/local'
|
||||
import vhdMerge, {
|
||||
chainVhd,
|
||||
createReadStream,
|
||||
Vhd,
|
||||
VHD_SECTOR_SIZE,
|
||||
} from './vhd-merge'
|
||||
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
|
||||
import chainVhd from './chain'
|
||||
import createReadStream from './createSyntheticStream'
|
||||
import Vhd from './vhd'
|
||||
import vhdMerge from './merge'
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(10000)
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await tmpDir()
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
@@ -57,11 +57,11 @@ test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
@@ -70,20 +70,18 @@ test('blocks can be moved', async () => {
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb =>
|
||||
randomBytes(VHD_SECTOR_SIZE, cb)
|
||||
)
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
|
||||
// we recover the data manually for speed reasons.
|
||||
@@ -93,7 +91,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockTable()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
const entry = vhd._getBatEntry(i)
|
||||
if (entry !== 0xffffffff) {
|
||||
@@ -110,7 +108,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
} finally {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await streamToBuffer(
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
@@ -124,11 +122,11 @@ test('writeData on empty file', async () => {
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
@@ -139,11 +137,11 @@ test('writeData in 2 non-overlaping operations', async () => {
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
|
||||
await newVhd.writeData(
|
||||
@@ -159,11 +157,11 @@ test('writeData in 2 overlaping operations', async () => {
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
const startSecondWrite = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
|
||||
@@ -179,11 +177,11 @@ test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
@@ -203,7 +201,7 @@ test('coalesce works with empty parent files', async () => {
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
@@ -226,11 +224,11 @@ test('coalesce works in normal cases', async () => {
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
@@ -242,7 +240,7 @@ test('coalesce works in normal cases', async () => {
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
@@ -261,7 +259,7 @@ test('coalesce works in normal cases', async () => {
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
@@ -270,15 +268,16 @@ test('coalesce works in normal cases', async () => {
|
||||
)
|
||||
})
|
||||
|
||||
test('createReadStream passes vhd-util check', async () => {
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = createReadStream(handler, 'randomfile.vhd')
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
})
|
||||
77
packages/vhd-lib/src/merge.js
Normal file
77
packages/vhd-lib/src/merge.js
Normal file
@@ -0,0 +1,77 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
export default concurrency(2)(async function merge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
const parentDiskType = parentVhd.footer.diskType
|
||||
assert(
|
||||
parentDiskType === DISK_TYPE_DIFFERENCING ||
|
||||
parentDiskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([
|
||||
parentVhd.readBlockAllocationTable(),
|
||||
childVhd.readBlockAllocationTable(),
|
||||
])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
})
|
||||
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
@@ -0,0 +1,134 @@
|
||||
/* eslint-env jest */
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
|
||||
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
import createReadableRawVHDStream from './createReadableRawStream'
|
||||
import createReadableSparseVHDStream from './createReadableSparseStream'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
createFooter(104448, Math.floor(Date.now() / 1000), {
|
||||
cylinders: 3,
|
||||
heads: 4,
|
||||
sectorsPerTrack: 17,
|
||||
})
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream does not crash', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawVHDStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
return expect(
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawVHDStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
|
||||
test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const blockSize = Math.pow(2, 16)
|
||||
const blocks = [
|
||||
{
|
||||
offsetBytes: blockSize * 3,
|
||||
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: blockSize * 5,
|
||||
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
const fileSize = blockSize * 10
|
||||
const stream = createReadableSparseVHDStream(
|
||||
fileSize,
|
||||
blockSize,
|
||||
[100, 700],
|
||||
blocks
|
||||
)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
})
|
||||
await expect(out1.slice(0, expected.length)).toEqual(expected)
|
||||
})
|
||||
@@ -1,18 +1,30 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
import fu from 'struct-fu'
|
||||
import { dirname, relative } from 'path'
|
||||
import getStream from 'get-stream'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import type RemoteHandler from './remote-handlers/abstract'
|
||||
import constantStream from './constant-stream'
|
||||
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
|
||||
import constantStream from './_constant-stream'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
@@ -24,160 +36,12 @@ const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
// Sizes in bytes.
|
||||
const VHD_FOOTER_SIZE = 512
|
||||
const VHD_HEADER_SIZE = 1024
|
||||
export const VHD_SECTOR_SIZE = 512
|
||||
|
||||
// Block allocation table entry size. (Block addr)
|
||||
const VHD_ENTRY_SIZE = 4
|
||||
|
||||
const VHD_PARENT_LOCATOR_ENTRIES = 8
|
||||
const VHD_PLATFORM_CODE_NONE = 0
|
||||
|
||||
// Types of backup treated. Others are not supported.
|
||||
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
|
||||
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
// Other.
|
||||
const BLOCK_UNUSED = 0xffffffff
|
||||
const BIT_MASK = 0x80
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
uint64('dataOffset'), // offset of the header, should always be 512
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.uint8('dataOffsetUnused', 8),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
VHD_PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const computeBatSize = entries =>
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
|
||||
|
||||
// Sectors conversions.
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
|
||||
|
||||
// Check/Set a bit on a vhd map.
|
||||
const mapTestBit = (map, bit) => ((map[bit >> 3] << (bit & 7)) & BIT_MASK) !== 0
|
||||
const mapSetBit = (map, bit) => {
|
||||
map[bit >> 3] |= BIT_MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
|
||||
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
@@ -187,6 +51,10 @@ const assertChecksum = (name, buf, struct) => {
|
||||
}
|
||||
}
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Format:
|
||||
@@ -210,7 +78,8 @@ const assertChecksum = (name, buf, struct) => {
|
||||
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
|
||||
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
|
||||
// - sectorSize = 512
|
||||
export class Vhd {
|
||||
|
||||
export default class Vhd {
|
||||
get batSize () {
|
||||
return computeBatSize(this.header.maxTableEntries)
|
||||
}
|
||||
@@ -232,7 +101,12 @@ export class Vhd {
|
||||
}
|
||||
|
||||
_read (start, n) {
|
||||
return this._readStream(start, n).then(streamToBuffer)
|
||||
return this._readStream(start, n)
|
||||
.then(getStream.buffer)
|
||||
.then(buf => {
|
||||
assert.equal(buf.length, n)
|
||||
return buf
|
||||
})
|
||||
}
|
||||
|
||||
containsBlock (id) {
|
||||
@@ -243,15 +117,15 @@ export class Vhd {
|
||||
getEndOfHeaders () {
|
||||
const { header } = this
|
||||
|
||||
let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
|
||||
let end = FOOTER_SIZE + HEADER_SIZE
|
||||
|
||||
// Max(end, block allocation table end)
|
||||
end = Math.max(end, header.tableOffset + this.batSize)
|
||||
|
||||
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
|
||||
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
|
||||
const entry = header.parentLocatorEntry[i]
|
||||
|
||||
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
|
||||
if (entry.platformCode !== PLATFORM_NONE) {
|
||||
end = Math.max(
|
||||
end,
|
||||
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
|
||||
@@ -266,7 +140,7 @@ export class Vhd {
|
||||
|
||||
// Returns the first sector after data.
|
||||
getEndOfData () {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
|
||||
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
@@ -283,25 +157,46 @@ export class Vhd {
|
||||
return sectorsToBytes(end)
|
||||
}
|
||||
|
||||
// Get the beginning (footer + header) of a vhd file.
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
|
||||
// TODO: extract the checks into reusable functions:
|
||||
// - better human reporting
|
||||
// - auto repair if possible
|
||||
async readHeaderAndFooter (checkSecondFooter = true) {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(FOOTER_SIZE)
|
||||
|
||||
assertChecksum('footer', bufFooter, fuFooter)
|
||||
assertChecksum('header', bufHeader, fuHeader)
|
||||
|
||||
if (checkSecondFooter) {
|
||||
const size = await this._handler.getSize(this._path)
|
||||
assert(
|
||||
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
|
||||
'footer1 !== footer2'
|
||||
)
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
const sectorsPerBlock = (this.sectorsPerBlock = Math.floor(
|
||||
header.blockSize / VHD_SECTOR_SIZE
|
||||
))
|
||||
const sectorsPerBlock = (this.sectorsPerBlock =
|
||||
header.blockSize / SECTOR_SIZE)
|
||||
|
||||
// Compute bitmap size in sectors.
|
||||
// Default: 1.
|
||||
@@ -317,23 +212,18 @@ export class Vhd {
|
||||
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
|
||||
}
|
||||
|
||||
// Check if a vhd object has a block allocation table.
|
||||
hasBlockAllocationTableMap () {
|
||||
return this.footer.fileFormatVersion > getVhdVersion(1, 0)
|
||||
}
|
||||
|
||||
// Returns a buffer that contains the block allocation table of a vhd file.
|
||||
async readBlockTable () {
|
||||
async readBlockAllocationTable () {
|
||||
const { header } = this
|
||||
this.blockTable = await this._read(
|
||||
header.tableOffset,
|
||||
header.maxTableEntries * VHD_ENTRY_SIZE
|
||||
header.maxTableEntries * 4
|
||||
)
|
||||
}
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
_getBatEntry (block) {
|
||||
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
|
||||
return this.blockTable.readUInt32BE(block * 4)
|
||||
}
|
||||
|
||||
_readBlock (blockId, onlyBitmap = false) {
|
||||
@@ -350,11 +240,11 @@ export class Vhd {
|
||||
onlyBitmap
|
||||
? { id: blockId, bitmap: buf }
|
||||
: {
|
||||
id: blockId,
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
buffer: buf,
|
||||
}
|
||||
id: blockId,
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
buffer: buf,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -371,7 +261,7 @@ export class Vhd {
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += VHD_ENTRY_SIZE
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
@@ -395,7 +285,7 @@ export class Vhd {
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += VHD_ENTRY_SIZE
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
@@ -419,9 +309,9 @@ export class Vhd {
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
}
|
||||
|
||||
@@ -431,7 +321,7 @@ export class Vhd {
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
@@ -439,7 +329,7 @@ export class Vhd {
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
@@ -478,7 +368,7 @@ export class Vhd {
|
||||
const prevBat = this.blockTable
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
|
||||
prevBat.copy(bat)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
|
||||
debug(
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
@@ -491,21 +381,18 @@ export class Vhd {
|
||||
|
||||
// set the first sector (bitmap) of a block
|
||||
_setBatEntry (block, blockSector) {
|
||||
const i = block * VHD_ENTRY_SIZE
|
||||
const i = block * 4
|
||||
const { blockTable } = this
|
||||
|
||||
blockTable.writeUInt32BE(blockSector, i)
|
||||
|
||||
return this._write(
|
||||
blockTable.slice(i, i + VHD_ENTRY_SIZE),
|
||||
this.header.tableOffset + i
|
||||
)
|
||||
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
|
||||
}
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async createBlock (blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
@@ -647,7 +534,7 @@ export class Vhd {
|
||||
const { header } = this
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
header.checksum = checksumStruct(rawHeader, fuHeader)
|
||||
const offset = VHD_FOOTER_SIZE
|
||||
const offset = FOOTER_SIZE
|
||||
debug(
|
||||
`Write header at: ${offset} (checksum=${
|
||||
header.checksum
|
||||
@@ -657,12 +544,12 @@ export class Vhd {
|
||||
}
|
||||
|
||||
async writeData (offsetSectors, buffer) {
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
|
||||
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
|
||||
const endBufferSectors = offsetSectors + bufferSizeSectors
|
||||
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
|
||||
await this.ensureBatSize(lastBlock)
|
||||
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
|
||||
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
|
||||
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
|
||||
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
|
||||
|
||||
@@ -681,11 +568,11 @@ export class Vhd {
|
||||
)
|
||||
const startInBuffer = Math.max(
|
||||
0,
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
|
||||
)
|
||||
const endInBuffer = Math.min(
|
||||
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
|
||||
VHD_SECTOR_SIZE,
|
||||
SECTOR_SIZE,
|
||||
buffer.length
|
||||
)
|
||||
let inputBuffer
|
||||
@@ -695,7 +582,7 @@ export class Vhd {
|
||||
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
|
||||
buffer.copy(
|
||||
inputBuffer,
|
||||
offsetInBlockSectors * VHD_SECTOR_SIZE,
|
||||
offsetInBlockSectors * SECTOR_SIZE,
|
||||
startInBuffer,
|
||||
endInBuffer
|
||||
)
|
||||
@@ -710,10 +597,10 @@ export class Vhd {
|
||||
}
|
||||
|
||||
async ensureSpaceForParentLocators (neededSectors) {
|
||||
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
|
||||
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
|
||||
firstLocatorOffset / VHD_SECTOR_SIZE
|
||||
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
|
||||
firstLocatorOffset / SECTOR_SIZE
|
||||
if (currentSpace < neededSectors) {
|
||||
const deltaSectors = neededSectors - currentSpace
|
||||
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
|
||||
@@ -722,269 +609,23 @@ export class Vhd {
|
||||
}
|
||||
return firstLocatorOffset
|
||||
}
|
||||
}
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
//
|
||||
// Child must be a delta backup !
|
||||
// Parent must be a full backup !
|
||||
//
|
||||
// TODO: update the identifier of the parent VHD.
|
||||
export default concurrency(2)(async function vhdMerge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
// Child must be a delta.
|
||||
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
throw new Error('Unable to merge, child is not a delta backup.')
|
||||
}
|
||||
|
||||
// Allocation table map is not yet implemented.
|
||||
if (
|
||||
parentVhd.hasBlockAllocationTableMap() ||
|
||||
childVhd.hasBlockAllocationTableMap()
|
||||
) {
|
||||
throw new Error('Unsupported allocation table map.')
|
||||
}
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
async setUniqueParentLocator (fileNameString) {
|
||||
const { header } = this
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
|
||||
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await this._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace =
|
||||
dataSpaceSectors * SECTOR_SIZE
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
})
|
||||
|
||||
// returns true if the child was actually modified
|
||||
export async function chainVhd (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = HARD_DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(parentName, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
|
||||
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await childVhd._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
|
||||
dataSpaceSectors
|
||||
)
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
return true
|
||||
}
|
||||
|
||||
export const createReadStream = asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockTable()
|
||||
|
||||
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: empty parentUuid and parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: 512 + 1024,
|
||||
parentUnicodeName: '',
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(
|
||||
Math.ceil(4 * header.maxTableEntries / VHD_SECTOR_SIZE) * VHD_SECTOR_SIZE
|
||||
)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
diskType: HARD_DISK_TYPE_DYNAMIC,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock =
|
||||
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil((512 + 1024 + bat.length) / VHD_SECTOR_SIZE);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
|
||||
yield data.slice(i * VHD_SECTOR_SIZE, n * VHD_SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * VHD_SECTOR_SIZE, i * VHD_SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlock)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.16.8",
|
||||
"version": "0.16.9",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
|
||||
@@ -182,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const getTaskResult = (task, onSuccess, onFailure) => {
|
||||
const getTaskResult = task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return [onFailure(new Cancel('task canceled'))]
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
return [onFailure(wrapError(task.error_info))]
|
||||
return Promise.reject(wrapError(task.error_info))
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return [onSuccess(task.result)]
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,7 +246,7 @@ export class Xapi extends EventEmitter {
|
||||
objects.getKey = getKey
|
||||
|
||||
this._objectsByRefs = createObject(null)
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = null
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = undefined
|
||||
|
||||
this._taskWatchers = Object.create(null)
|
||||
|
||||
@@ -642,11 +642,11 @@ export class Xapi extends EventEmitter {
|
||||
let watcher = watchers[ref]
|
||||
if (watcher === undefined) {
|
||||
// sync check if the task is already settled
|
||||
const task = this.objects.all[ref]
|
||||
const task = this._objectsByRefs[ref]
|
||||
if (task !== undefined) {
|
||||
const result = getTaskResult(task, Promise.resolve, Promise.reject)
|
||||
if (result) {
|
||||
return result[0]
|
||||
const result = getTaskResult(task)
|
||||
if (result !== undefined) {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,11 +793,12 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (
|
||||
taskWatcher !== undefined &&
|
||||
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
|
||||
) {
|
||||
delete taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
const result = getTaskResult(object)
|
||||
if (result !== undefined) {
|
||||
taskWatcher.resolve(result)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.44",
|
||||
"@babel/polyfill": "7.0.0-beta.46",
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^2.2.0",
|
||||
"event-to-promise": "^0.8.0",
|
||||
@@ -49,10 +49,10 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.5.1",
|
||||
"version": "0.5.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -52,6 +52,7 @@ class AuthSamlXoPlugin {
|
||||
new Strategy(this._conf, async (profile, done) => {
|
||||
const name = profile[this._usernameField]
|
||||
if (!name) {
|
||||
console.warn('xo-server-auth-saml:', profile)
|
||||
done('no name found for this user')
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.10.0",
|
||||
"version": "0.11.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -35,6 +35,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"babel-runtime": "^6.26.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -42,6 +43,7 @@
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
@@ -56,7 +58,8 @@
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash"
|
||||
"lodash",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, startCase } from 'lodash'
|
||||
import { find, forEach, get, startCase } from 'lodash'
|
||||
|
||||
import pkg from '../package'
|
||||
|
||||
@@ -41,9 +41,9 @@ const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
.tz(timezone)
|
||||
.format(DATE_FORMAT)
|
||||
moment(timestamp)
|
||||
.tz(timezone)
|
||||
.format(DATE_FORMAT)
|
||||
: timestamp => moment(timestamp).format(DATE_FORMAT)
|
||||
|
||||
const formatDuration = milliseconds => moment.duration(milliseconds).humanize()
|
||||
@@ -66,6 +66,7 @@ const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
const UNHEALTHY_VDI_CHAIN_MESSAGE =
|
||||
@@ -94,14 +95,351 @@ class BackupReportsXoPlugin {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper (status) {
|
||||
return new Promise(resolve => resolve(this._listener(status))).catch(
|
||||
logError
|
||||
)
|
||||
_wrapper (status, job, schedule) {
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule)
|
||||
: this._listener(status, job, schedule)
|
||||
)
|
||||
).catch(logError)
|
||||
}
|
||||
|
||||
async _backupNgListener (runJobId, _, { timezone }) {
|
||||
const xo = this._xo
|
||||
const logs = await xo.getBackupNgLogs(runJobId)
|
||||
const jobLog = logs['roots'][0]
|
||||
const vmsTaskLog = logs[jobLog.id]
|
||||
|
||||
const { reportWhen, mode } = jobLog.data || {}
|
||||
if (reportWhen === 'never') {
|
||||
return
|
||||
}
|
||||
|
||||
const formatDate = createDateFormater(timezone)
|
||||
const jobName = (await xo.getJob(jobLog.jobId, 'backup')).name
|
||||
|
||||
if (jobLog.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
jobLog.error.message === NO_VMS_MATCH_THIS_PATTERN
|
||||
? ['Skipped', ICON_SKIPPED]
|
||||
: ['Failure', ICON_FAILURE]
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **mode**: ${mode}`,
|
||||
`- **Start time**: ${formatDate(jobLog.start)}`,
|
||||
`- **End time**: ${formatDate(jobLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(jobLog.duration)}`,
|
||||
`- **Error**: ${jobLog.error.message}`,
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${jobName} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Backup report for ${jobName} - Error : ${
|
||||
jobLog.error.message
|
||||
}`,
|
||||
})
|
||||
}
|
||||
|
||||
const failedVmsText = []
|
||||
const skippedVmsText = []
|
||||
const successfulVmsText = []
|
||||
const nagiosText = []
|
||||
|
||||
let globalMergeSize = 0
|
||||
let globalTransferSize = 0
|
||||
let nFailures = 0
|
||||
let nSkipped = 0
|
||||
|
||||
for (const vmTaskLog of vmsTaskLog || []) {
|
||||
const vmTaskStatus = vmTaskLog.status
|
||||
if (vmTaskStatus === 'success' && reportWhen === 'failure') {
|
||||
return
|
||||
}
|
||||
|
||||
const vmId = vmTaskLog.data.id
|
||||
let vm
|
||||
try {
|
||||
vm = xo.getObject(vmId)
|
||||
} catch (e) {}
|
||||
const text = [
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
`- **Start time**: ${formatDate(vmTaskLog.start)}`,
|
||||
`- **End time**: ${formatDate(vmTaskLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(vmTaskLog.duration)}`,
|
||||
]
|
||||
|
||||
const failedSubTasks = []
|
||||
const operationsText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
for (const subTaskLog of logs[vmTaskLog.taskId] || []) {
|
||||
const { data, status, result, message } = subTaskLog
|
||||
const icon =
|
||||
subTaskLog.status === 'success' ? ICON_SUCCESS : ICON_FAILURE
|
||||
const errorMessage = ` **Error**: ${get(result, 'message')}`
|
||||
|
||||
if (message === 'snapshot') {
|
||||
operationsText.push(`- **Snapshot** ${icon}`)
|
||||
if (status === 'failure') {
|
||||
failedSubTasks.push('Snapshot')
|
||||
operationsText.push('', errorMessage)
|
||||
}
|
||||
} else if (data.type === 'remote') {
|
||||
const remoteId = data.id
|
||||
const remote = await xo.getRemote(remoteId).catch(() => {})
|
||||
remotesText.push(
|
||||
`- **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${remoteId}) ${icon}`
|
||||
)
|
||||
if (status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : remoteId)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const srId = data.id
|
||||
let sr
|
||||
try {
|
||||
sr = xo.getObject(srId)
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, srId]
|
||||
srsText.push(`- **${srName}** (${srUuid}) ${icon}`)
|
||||
if (status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : srId)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (operationsText.length !== 0) {
|
||||
operationsText.unshift(`#### Operations`, '')
|
||||
}
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`#### SRs`, '')
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`#### remotes`, '')
|
||||
}
|
||||
const subText = [...operationsText, '', ...srsText, '', ...remotesText]
|
||||
const result = vmTaskLog.result
|
||||
if (vmTaskStatus === 'failure' && result !== undefined) {
|
||||
const { message } = result
|
||||
if (isSkippedError(result)) {
|
||||
++nSkipped
|
||||
skippedVmsText.push(
|
||||
...text,
|
||||
`- **Reason**: ${
|
||||
message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: message
|
||||
}`,
|
||||
''
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
} : ${message} ]`
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, `- **Error**: ${message}`, '')
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
} : ${message} ]`
|
||||
)
|
||||
}
|
||||
} else {
|
||||
let transferSize, transferDuration, mergeSize, mergeDuration
|
||||
|
||||
forEach(logs[vmTaskLog.taskId], ({ taskId }) => {
|
||||
if (transferSize !== undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
const transferTask = find(logs[taskId], { message: 'transfer' })
|
||||
if (transferTask !== undefined) {
|
||||
transferSize = transferTask.result.size
|
||||
transferDuration = transferTask.end - transferTask.start
|
||||
}
|
||||
|
||||
const mergeTask = find(logs[taskId], { message: 'merge' })
|
||||
if (mergeTask !== undefined) {
|
||||
mergeSize = mergeTask.result.size
|
||||
mergeDuration = mergeTask.end - mergeTask.start
|
||||
}
|
||||
})
|
||||
if (transferSize !== undefined) {
|
||||
globalTransferSize += transferSize
|
||||
text.push(
|
||||
`- **Transfer size**: ${formatSize(transferSize)}`,
|
||||
`- **Transfer speed**: ${formatSpeed(
|
||||
transferSize,
|
||||
transferDuration
|
||||
)}`
|
||||
)
|
||||
}
|
||||
if (mergeSize !== undefined) {
|
||||
globalMergeSize += mergeSize
|
||||
text.push(
|
||||
`- **Merge size**: ${formatSize(mergeSize)}`,
|
||||
`- **Merge speed**: ${formatSpeed(mergeSize, mergeDuration)}`
|
||||
)
|
||||
}
|
||||
if (vmTaskStatus === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
nagiosText.push(
|
||||
`[(Failed) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
}: (failed)[${failedSubTasks.toString()}]]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
}
|
||||
}
|
||||
}
|
||||
const globalSuccess = nFailures === 0 && nSkipped === 0
|
||||
if (reportWhen === 'failure' && globalSuccess) {
|
||||
return
|
||||
}
|
||||
|
||||
const nVms = vmsTaskLog.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped
|
||||
const globalStatus = globalSuccess
|
||||
? `Success`
|
||||
: nFailures !== 0 ? `Failure` : `Skipped`
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **mode**: ${mode}`,
|
||||
`- **Start time**: ${formatDate(jobLog.start)}`,
|
||||
`- **End time**: ${formatDate(jobLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(jobLog.duration)}`,
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
`## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`,
|
||||
'',
|
||||
...failedVmsText
|
||||
)
|
||||
}
|
||||
|
||||
if (nSkipped !== 0) {
|
||||
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
`## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`,
|
||||
'',
|
||||
...successfulVmsText
|
||||
)
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${jobName} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
subject,
|
||||
markdown,
|
||||
}),
|
||||
xo.sendToXmppClient !== undefined &&
|
||||
xo.sendToXmppClient({
|
||||
to: this._xmppReceivers,
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendSlackMessage !== undefined &&
|
||||
xo.sendSlackMessage({
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
nagiosStatus,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
_listener (status) {
|
||||
const { calls } = status
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
error.message === NO_VMS_MATCH_THIS_PATTERN
|
||||
? ['Skipped', ICON_SKIPPED]
|
||||
: ['Failure', ICON_FAILURE]
|
||||
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **Start time**: ${formatDate(status.start)}`,
|
||||
`- **End time**: ${formatDate(status.end)}`,
|
||||
`- **Duration**: ${formatDuration(status.end - status.start)}`,
|
||||
`- **Error**: ${error.message}`,
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
|
||||
error.message
|
||||
}`,
|
||||
})
|
||||
}
|
||||
|
||||
const callIds = Object.keys(calls)
|
||||
|
||||
const nCalls = callIds.length
|
||||
@@ -139,8 +477,6 @@ class BackupReportsXoPlugin {
|
||||
const skippedBackupsText = []
|
||||
const successfulBackupText = []
|
||||
|
||||
const formatDate = createDateFormater(status.timezone)
|
||||
|
||||
forEach(calls, call => {
|
||||
const { id = call.params.vm } = call.params
|
||||
|
||||
@@ -226,9 +562,8 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const { end, start } = status
|
||||
const { tag } = oneCall.params
|
||||
const duration = end - start
|
||||
const duration = status.end - status.start
|
||||
const nSuccesses = nCalls - nFailures - nSkipped
|
||||
const globalStatus = globalSuccess
|
||||
? `Success`
|
||||
@@ -238,8 +573,8 @@ class BackupReportsXoPlugin {
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **Type**: ${formatMethod(method)}`,
|
||||
`- **Start time**: ${formatDate(start)}`,
|
||||
`- **End time**: ${formatDate(end)}`,
|
||||
`- **Start time**: ${formatDate(status.start)}`,
|
||||
`- **End time**: ${formatDate(status.end)}`,
|
||||
`- **Duration**: ${formatDuration(duration)}`,
|
||||
`- **Successes**: ${nSuccesses} / ${nCalls}`,
|
||||
]
|
||||
@@ -285,37 +620,20 @@ class BackupReportsXoPlugin {
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
markdown,
|
||||
}),
|
||||
xo.sendToXmppClient !== undefined &&
|
||||
xo.sendToXmppClient({
|
||||
to: this._xmppReceivers,
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendSlackMessage !== undefined &&
|
||||
xo.sendSlackMessage({
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: globalSuccess ? 0 : 2,
|
||||
message: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
|
||||
}),
|
||||
])
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-perf-alert",
|
||||
"version": "0.0.0",
|
||||
"version": "0.1.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -26,10 +26,10 @@
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "^7.0.0-beta.44",
|
||||
"@babel/cli": "7.0.0-beta.46",
|
||||
"@babel/core": "7.0.0-beta.46",
|
||||
"@babel/preset-env": "7.0.0-beta.46",
|
||||
"@babel/preset-flow": "^7.0.0-beta.46",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import JSON5 from 'json5'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { forOwn, map, mean } from 'lodash'
|
||||
import { assign, forOwn, map, mean } from 'lodash'
|
||||
import { utcParse } from 'd3-time-format'
|
||||
|
||||
const VM_FUNCTIONS = {
|
||||
cpuUsage: {
|
||||
name: 'VM CPU usage',
|
||||
description:
|
||||
'Raises an alarm when the average usage of any CPU is higher than the threshold',
|
||||
unit: '%',
|
||||
@@ -31,6 +32,7 @@ const VM_FUNCTIONS = {
|
||||
},
|
||||
},
|
||||
memoryUsage: {
|
||||
name: 'VM memory usage',
|
||||
description:
|
||||
'Raises an alarm when the used memory % is higher than the threshold',
|
||||
unit: '% used',
|
||||
@@ -60,6 +62,7 @@ const VM_FUNCTIONS = {
|
||||
|
||||
const HOST_FUNCTIONS = {
|
||||
cpuUsage: {
|
||||
name: 'host CPU usage',
|
||||
description:
|
||||
'Raises an alarm when the average usage of any CPU is higher than the threshold',
|
||||
unit: '%',
|
||||
@@ -86,6 +89,7 @@ const HOST_FUNCTIONS = {
|
||||
},
|
||||
},
|
||||
memoryUsage: {
|
||||
name: 'host memory usage',
|
||||
description:
|
||||
'Raises an alarm when the used memory % is higher than the threshold',
|
||||
unit: '% used',
|
||||
@@ -105,9 +109,25 @@ const HOST_FUNCTIONS = {
|
||||
)
|
||||
},
|
||||
getDisplayableValue,
|
||||
shouldAlarm: () => {
|
||||
return getDisplayableValue() > threshold
|
||||
},
|
||||
shouldAlarm: () => getDisplayableValue() > threshold,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const SR_FUNCTIONS = {
|
||||
storageUsage: {
|
||||
name: 'SR storage usage',
|
||||
description:
|
||||
'Raises an alarm when the used disk space % is higher than the threshold',
|
||||
unit: '% used',
|
||||
comparator: '>',
|
||||
createGetter: threshold => sr => {
|
||||
const getDisplayableValue = () =>
|
||||
sr.physical_utilisation * 100 / sr.physical_size
|
||||
return {
|
||||
getDisplayableValue,
|
||||
shouldAlarm: () => getDisplayableValue() > threshold,
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -116,6 +136,7 @@ const HOST_FUNCTIONS = {
|
||||
const TYPE_FUNCTION_MAP = {
|
||||
vm: VM_FUNCTIONS,
|
||||
host: HOST_FUNCTIONS,
|
||||
sr: SR_FUNCTIONS,
|
||||
}
|
||||
|
||||
// list of currently ringing alarms, to avoid double notification
|
||||
@@ -229,11 +250,52 @@ export const configurationSchema = {
|
||||
required: ['uuids'],
|
||||
},
|
||||
},
|
||||
srMonitors: {
|
||||
type: 'array',
|
||||
title: 'SR Monitors',
|
||||
description:
|
||||
'Alarms checking all SRs on all pools. The selected performance counter is sampled regularly and averaged. ' +
|
||||
'The Average is compared to the threshold and an alarm is raised upon crossing',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
uuids: {
|
||||
title: 'SRs',
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
$type: 'SR',
|
||||
},
|
||||
},
|
||||
variableName: {
|
||||
title: 'Alarm Type',
|
||||
description: Object.keys(SR_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
|
||||
SR_FUNCTIONS[k].description
|
||||
}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
default: Object.keys(SR_FUNCTIONS)[0],
|
||||
enum: Object.keys(SR_FUNCTIONS),
|
||||
},
|
||||
alarmTriggerLevel: {
|
||||
title: 'Threshold',
|
||||
description:
|
||||
'The direction of the crossing is given by the Alarm type',
|
||||
type: 'number',
|
||||
default: 80,
|
||||
},
|
||||
},
|
||||
required: ['uuids'],
|
||||
},
|
||||
},
|
||||
toEmails: {
|
||||
type: 'array',
|
||||
title: 'Email addresses',
|
||||
description: 'Email addresses of the alert recipients',
|
||||
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
@@ -259,13 +321,11 @@ const raiseOrLowerAlarm = (
|
||||
currentAlarms[alarmId] = true
|
||||
raiseCallback(alarmId)
|
||||
}
|
||||
} else {
|
||||
if (current) {
|
||||
try {
|
||||
lowerCallback(alarmId)
|
||||
} finally {
|
||||
delete currentAlarms[alarmId]
|
||||
}
|
||||
} else if (current) {
|
||||
try {
|
||||
lowerCallback(alarmId)
|
||||
} finally {
|
||||
delete currentAlarms[alarmId]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,24 +357,38 @@ class PerfAlertXoPlugin {
|
||||
clearCurrentAlarms()
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.stop()
|
||||
}
|
||||
|
||||
_generateUrl (type, object) {
|
||||
const map = {
|
||||
vm: () => `${this._configuration.baseUrl}#/vms/${object.uuid}/stats`,
|
||||
host: () => `${this._configuration.baseUrl}#/hosts/${object.uuid}/stats`,
|
||||
const { baseUrl } = this._configuration
|
||||
const { uuid } = object
|
||||
switch (type) {
|
||||
case 'vm':
|
||||
return `${baseUrl}#/vms/${uuid}/stats`
|
||||
case 'host':
|
||||
return `${baseUrl}#/hosts/${uuid}/stats`
|
||||
case 'sr':
|
||||
return `${baseUrl}#/srs/${uuid}/general`
|
||||
default:
|
||||
return 'unknown type'
|
||||
}
|
||||
return map[type]()
|
||||
}
|
||||
|
||||
async test () {
|
||||
const hostMonitorPart2 = await Promise.all(
|
||||
map(this._getMonitors(), async m => {
|
||||
const tableBody = (await m.snapshot()).map(entry => entry.tableItem)
|
||||
return `
|
||||
const monitorBodies = await Promise.all(
|
||||
map(
|
||||
this._getMonitors(),
|
||||
async m => `
|
||||
## Monitor for ${m.title}
|
||||
|
||||
${m.tableHeader}
|
||||
${tableBody.join('')}`
|
||||
})
|
||||
${(await m.snapshot()).map(entry => entry.listItem).join('')}`
|
||||
)
|
||||
)
|
||||
|
||||
this._sendAlertEmail(
|
||||
@@ -322,18 +396,10 @@ ${tableBody.join('')}`
|
||||
`
|
||||
# Performance Alert Test
|
||||
Your alarms and their current status:
|
||||
${hostMonitorPart2.join('\n')}`
|
||||
${monitorBodies.join('\n')}`
|
||||
)
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.stop()
|
||||
}
|
||||
|
||||
_parseDefinition (definition) {
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${
|
||||
definition.alarmTriggerLevel
|
||||
@@ -384,63 +450,67 @@ ${hostMonitorPart2.join('\n')}`
|
||||
definition.alarmTriggerPeriod !== undefined
|
||||
? definition.alarmTriggerPeriod
|
||||
: 60
|
||||
const typeText = definition.objectType === 'host' ? 'Host' : 'VM'
|
||||
return {
|
||||
...definition,
|
||||
alarmId,
|
||||
vmFunction: typeFunction,
|
||||
title: `${typeText} ${definition.variableName} ${
|
||||
typeFunction.comparator
|
||||
} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
|
||||
tableHeader: `${typeText} | Value | Alert\n--- | -----:| ---:`,
|
||||
title: `${typeFunction.name} ${typeFunction.comparator} ${
|
||||
definition.alarmTriggerLevel
|
||||
}${typeFunction.unit}`,
|
||||
snapshot: async () => {
|
||||
return Promise.all(
|
||||
map(definition.uuids, async uuid => {
|
||||
try {
|
||||
const monitoredObject = this._xo.getXapi(uuid).getObject(uuid)
|
||||
const objectLink = `[${
|
||||
monitoredObject.name_label
|
||||
}](${this._generateUrl(definition.objectType, monitoredObject)})`
|
||||
const rrd = await this.getRrd(monitoredObject, observationPeriod)
|
||||
const couldFindRRD = rrd !== null
|
||||
const result = {
|
||||
object: monitoredObject,
|
||||
couldFindRRD,
|
||||
objectLink: objectLink,
|
||||
listItem: ` * ${typeText} ${objectLink} ${
|
||||
definition.variableName
|
||||
}: **Can't read performance counters**\n`,
|
||||
tableItem: `${objectLink} | - | **Can't read performance counters**\n`,
|
||||
uuid,
|
||||
name: definition.name,
|
||||
object: this._xo.getXapi(uuid).getObject(uuid),
|
||||
}
|
||||
if (!couldFindRRD) {
|
||||
return result
|
||||
|
||||
if (result.object === undefined) {
|
||||
throw new Error('object not found')
|
||||
}
|
||||
const data = parseData(rrd, monitoredObject.uuid)
|
||||
const textValue =
|
||||
data.getDisplayableValue().toFixed(1) + typeFunction.unit
|
||||
const shouldAlarm = data.shouldAlarm()
|
||||
return {
|
||||
...result,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: shouldAlarm,
|
||||
textValue: textValue,
|
||||
listItem: ` * ${typeText} ${objectLink} ${
|
||||
definition.variableName
|
||||
}: ${textValue}\n`,
|
||||
tableItem: `${objectLink} | ${textValue} | ${
|
||||
shouldAlarm ? '**Alert Ongoing**' : 'no alert'
|
||||
}\n`,
|
||||
|
||||
result.objectLink = `[${
|
||||
result.object.name_label
|
||||
}](${this._generateUrl(definition.objectType, result.object)})`
|
||||
|
||||
if (typeFunction.createGetter === undefined) {
|
||||
// Stats via RRD
|
||||
result.rrd = await this.getRrd(result.object, observationPeriod)
|
||||
if (result.rrd !== null) {
|
||||
const data = parseData(result.rrd, result.object.uuid)
|
||||
assign(result, {
|
||||
data,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Stats via XAPI
|
||||
const getter = typeFunction.createGetter(
|
||||
definition.alarmTriggerLevel
|
||||
)
|
||||
const data = getter(result.object)
|
||||
assign(result, {
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
}
|
||||
|
||||
result.listItem = ` * ${result.objectLink}: ${
|
||||
result.value === undefined
|
||||
? "**Can't read performance counters**"
|
||||
: result.value.toFixed(1) + typeFunction.unit
|
||||
}\n`
|
||||
|
||||
return result
|
||||
} catch (_) {
|
||||
return {
|
||||
uuid,
|
||||
object: null,
|
||||
couldFindRRD: false,
|
||||
objectLink: `cannot find object ${uuid}`,
|
||||
listItem: ` * ${typeText} ${uuid} ${
|
||||
definition.variableName
|
||||
}: **Can't read performance counters**\n`,
|
||||
tableItem: `object ${uuid} | - | **Can't read performance counters**\n`,
|
||||
listItem: ` * ${uuid}: **Can't read performance counters**\n`,
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -452,11 +522,17 @@ ${hostMonitorPart2.join('\n')}`
|
||||
_getMonitors () {
|
||||
return map(this._configuration.hostMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'host' })
|
||||
).concat(
|
||||
map(this._configuration.vmMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'vm' })
|
||||
)
|
||||
)
|
||||
.concat(
|
||||
map(this._configuration.vmMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'vm' })
|
||||
)
|
||||
)
|
||||
.concat(
|
||||
map(this._configuration.srMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'sr' })
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
async _checkMonitors () {
|
||||
@@ -466,7 +542,7 @@ ${hostMonitorPart2.join('\n')}`
|
||||
for (const entry of snapshot) {
|
||||
raiseOrLowerAlarm(
|
||||
`${monitor.alarmId}|${entry.uuid}|RRD`,
|
||||
!entry.couldFindRRD,
|
||||
entry.value === undefined,
|
||||
() => {
|
||||
this._sendAlertEmail(
|
||||
'Secondary Issue',
|
||||
@@ -477,9 +553,11 @@ ${entry.listItem}`
|
||||
},
|
||||
() => {}
|
||||
)
|
||||
if (!entry.couldFindRRD) {
|
||||
|
||||
if (entry.value === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const raiseAlarm = alarmId => {
|
||||
// sample XenCenter message:
|
||||
// value: 1.242087 config: <variable> <name value="mem_usage"/> </variable>
|
||||
@@ -500,23 +578,24 @@ ${entry.listItem}`
|
||||
this._sendAlertEmail(
|
||||
'',
|
||||
`
|
||||
## ALERT ${monitor.title}
|
||||
## ALERT: ${monitor.title}
|
||||
${entry.listItem}
|
||||
### Description
|
||||
${monitor.vmFunction.description}`
|
||||
)
|
||||
}
|
||||
|
||||
const lowerAlarm = alarmId => {
|
||||
console.log('lowering Alarm', alarmId)
|
||||
this._sendAlertEmail(
|
||||
'END OF ALERT',
|
||||
`
|
||||
## END OF ALERT ${monitor.title}
|
||||
## END OF ALERT: ${monitor.title}
|
||||
${entry.listItem}
|
||||
### Description
|
||||
${monitor.vmFunction.description}`
|
||||
)
|
||||
}
|
||||
|
||||
raiseOrLowerAlarm(
|
||||
`${monitor.alarmId}|${entry.uuid}`,
|
||||
entry.shouldAlarm,
|
||||
|
||||
3
packages/xo-server-rework/.babelrc.js
Normal file
3
packages/xo-server-rework/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
24
packages/xo-server-rework/.npmignore
Normal file
24
packages/xo-server-rework/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
51
packages/xo-server-rework/README.md
Normal file
51
packages/xo-server-rework/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# xo-server [](https://travis-ci.org/vatesfr/xo-server)
|
||||
|
||||
> Server part of [Xen Orchestra](https://xen-orchestra.com)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/xo-server):
|
||||
|
||||
```
|
||||
> npm install --global xo-server
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
> xo-server
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xo-web/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
AGPL3 © [Vates SAS](http://vates.fr)
|
||||
40
packages/xo-server-rework/config.json
Normal file
40
packages/xo-server-rework/config.json
Normal file
@@ -0,0 +1,40 @@
|
||||
// Vendor config: DO NOT TOUCH!
|
||||
//
|
||||
// See sample.config.yaml to override.
|
||||
{
|
||||
|
||||
// Should users be created on first sign in?
|
||||
//
|
||||
// Necessary for external authentication providers.
|
||||
"createUserOnFirstSignin": true,
|
||||
|
||||
"datadir": "/var/lib/xo-server/data",
|
||||
|
||||
"http": {
|
||||
"listen": [
|
||||
{
|
||||
"port": 80
|
||||
}
|
||||
],
|
||||
|
||||
"mounts": {},
|
||||
|
||||
// Ciphers to use.
|
||||
//
|
||||
// These are the default ciphers in Node 4.2.6, we are setting
|
||||
// them explicitly for older Node versions.
|
||||
"ciphers": "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:DHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA256:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!SRP:!CAMELLIA",
|
||||
|
||||
// Tell Node to respect the cipher order.
|
||||
"honorCipherOrder": true,
|
||||
|
||||
// Specify to use at least TLSv1.1.
|
||||
// See: https://github.com/certsimple/minimum-tls-version
|
||||
"secureOptions": 117440512
|
||||
},
|
||||
|
||||
"jwt": {
|
||||
"expiresIn": "7d",
|
||||
"secret": "P],7x#cRhuy,wCR'$}'N?<2yOQ3v6.!b*|1B2P36(wKsYICH|6"
|
||||
}
|
||||
}
|
||||
1047
packages/xo-server-rework/flow-typed/npm/lodash_v4.x.x.js
vendored
Normal file
1047
packages/xo-server-rework/flow-typed/npm/lodash_v4.x.x.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user