Compare commits
262 Commits
xo-server-
...
visualizat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
15e96e0921 | ||
|
|
9644cabbac | ||
|
|
c4eeb2b77e | ||
|
|
ee1e9ba7b7 | ||
|
|
3655117a13 | ||
|
|
cc2c71c076 | ||
|
|
9ca273b2c4 | ||
|
|
b85c2f35b6 | ||
|
|
fdd79885f9 | ||
|
|
b2eb970796 | ||
|
|
3ee9c1b550 | ||
|
|
2566c24753 | ||
|
|
49e1b0ba7e | ||
|
|
453c329f14 | ||
|
|
27193f38f3 | ||
|
|
d3dc94e210 | ||
|
|
6dad860635 | ||
|
|
0362ac8909 | ||
|
|
e7b79f83d1 | ||
|
|
62379c1e41 | ||
|
|
23b422e3df | ||
|
|
f8e6dee635 | ||
|
|
c8e9b287f4 | ||
|
|
c9412dbcd0 | ||
|
|
77222e9e6b | ||
|
|
9d0f24eae1 | ||
|
|
6e527947be | ||
|
|
e7051c1129 | ||
|
|
3196c7ca09 | ||
|
|
0e1e32d241 | ||
|
|
a34912fb0d | ||
|
|
c7c6e0e2ff | ||
|
|
1e529c995a | ||
|
|
7be1c7a47b | ||
|
|
b17380443b | ||
|
|
59e68682bd | ||
|
|
b7a92cfe92 | ||
|
|
5ebe27da49 | ||
|
|
42df6ba6fa | ||
|
|
8210fddfab | ||
|
|
f55ed273c5 | ||
|
|
d67e95af7b | ||
|
|
0b0f235252 | ||
|
|
36a5f52068 | ||
|
|
31266728f7 | ||
|
|
8c79ea4ce3 | ||
|
|
c73a4204cb | ||
|
|
0b3c2cc252 | ||
|
|
2bd3ca1d0b | ||
|
|
ce8649d991 | ||
|
|
9bd563b111 | ||
|
|
6ceb924a85 | ||
|
|
c2ef0ded43 | ||
|
|
6081a6f6db | ||
|
|
a0d92a0b1d | ||
|
|
3cf1f7ede2 | ||
|
|
5757afa1d8 | ||
|
|
86e9b9c1b8 | ||
|
|
1cdd1fa00e | ||
|
|
9d12759c68 | ||
|
|
594341fab6 | ||
|
|
4e88125cbe | ||
|
|
13237180a2 | ||
|
|
f64d7e0b6e | ||
|
|
040a6930a4 | ||
|
|
c54b9189a6 | ||
|
|
8882f1b019 | ||
|
|
ae6416c4d2 | ||
|
|
8faed87656 | ||
|
|
0983f05969 | ||
|
|
d43e2544a1 | ||
|
|
ca83d11ac8 | ||
|
|
1cdcdd9b5f | ||
|
|
cc7806e35b | ||
|
|
0ee48b6623 | ||
|
|
8c02e0efbd | ||
|
|
34d3ca82bc | ||
|
|
43822d3667 | ||
|
|
f4ac73b3b4 | ||
|
|
f084b6def9 | ||
|
|
a00d101ff7 | ||
|
|
9d5900d9b6 | ||
|
|
28fb4e8216 | ||
|
|
bec4dbe652 | ||
|
|
72cc14f508 | ||
|
|
d20941cc2c | ||
|
|
9cb8a05316 | ||
|
|
dccd799f6d | ||
|
|
b42b3d1b01 | ||
|
|
a40d6f772a | ||
|
|
6e9bfd18d9 | ||
|
|
3b92dd0139 | ||
|
|
564d53610a | ||
|
|
b4c7b8ac7f | ||
|
|
7acd90307b | ||
|
|
d3ec76c19f | ||
|
|
688cb20674 | ||
|
|
c63be20bea | ||
|
|
df36633223 | ||
|
|
3597621d88 | ||
|
|
8387684839 | ||
|
|
f261f395f1 | ||
|
|
f27170ff0e | ||
|
|
d82c951db6 | ||
|
|
41ca853e03 | ||
|
|
a08d098265 | ||
|
|
875681b8ce | ||
|
|
a03dcbbf55 | ||
|
|
97cabbbc69 | ||
|
|
13725a9e21 | ||
|
|
f47df961f7 | ||
|
|
2f644d5eeb | ||
|
|
4b292bb78c | ||
|
|
804891cc81 | ||
|
|
d335e06371 | ||
|
|
477058ad23 | ||
|
|
eb3b68401d | ||
|
|
865d2df124 | ||
|
|
88160bae1d | ||
|
|
f581e93b88 | ||
|
|
21a7cf7158 | ||
|
|
5edee4bae0 | ||
|
|
916ca5576a | ||
|
|
6c861bfd1f | ||
|
|
56961b55bd | ||
|
|
cdcd7154ba | ||
|
|
654a2ee870 | ||
|
|
903634073a | ||
|
|
0d4818feb6 | ||
|
|
d6aa40679b | ||
|
|
b7cc31c94d | ||
|
|
6860156b6f | ||
|
|
29486c9ce2 | ||
|
|
7cfa6a5da4 | ||
|
|
2563be472b | ||
|
|
7289e856d9 | ||
|
|
975de1954e | ||
|
|
95bcf0c080 | ||
|
|
f900a5ef4f | ||
|
|
7f1ab529ae | ||
|
|
49fc86e4b1 | ||
|
|
924aef84f1 | ||
|
|
96e6e2b72a | ||
|
|
71997d4e65 | ||
|
|
447f2f9506 | ||
|
|
79aef9024b | ||
|
|
fdf6f4fdf3 | ||
|
|
4d1eaaaade | ||
|
|
bdad6c0f6d | ||
|
|
ff1ca5d933 | ||
|
|
2cf4c494a4 | ||
|
|
95ac0a861a | ||
|
|
746c301f39 | ||
|
|
6455b12b58 | ||
|
|
485b8fe993 | ||
|
|
d7527f280c | ||
|
|
d57fa4375d | ||
|
|
d9e42c6625 | ||
|
|
28293d3fce | ||
|
|
d505401446 | ||
|
|
fafc24aeae | ||
|
|
f78ef0d208 | ||
|
|
8384cc3652 | ||
|
|
60aa18a229 | ||
|
|
3d64b42a89 | ||
|
|
b301997d4b | ||
|
|
ab34743250 | ||
|
|
bc14a1d167 | ||
|
|
2886ec116f | ||
|
|
c2beb2a5fa | ||
|
|
d6ac10f527 | ||
|
|
9dcd8a707a | ||
|
|
e1e97ef158 | ||
|
|
5d6b37f81a | ||
|
|
e1da08ba38 | ||
|
|
1dfb50fefd | ||
|
|
5c06ebc9c8 | ||
|
|
52a9270fb0 | ||
|
|
82247d7422 | ||
|
|
b34688043f | ||
|
|
ce4bcbd19d | ||
|
|
cde9a02c32 | ||
|
|
fe1da4ea12 | ||
|
|
a73306817b | ||
|
|
54e683d3d4 | ||
|
|
f49910ca82 | ||
|
|
4052f7f736 | ||
|
|
b47e097983 | ||
|
|
e44dbfb2a4 | ||
|
|
7d69dd9400 | ||
|
|
e6aae8fcfa | ||
|
|
da800b3391 | ||
|
|
3a574bcecc | ||
|
|
1bb0e234e7 | ||
|
|
b7e14ebf2a | ||
|
|
2af1207702 | ||
|
|
ecfed30e6e | ||
|
|
d06c3e3dd8 | ||
|
|
16b3fbeb16 | ||
|
|
0938804947 | ||
|
|
851bcf9816 | ||
|
|
9f6fc785bc | ||
|
|
56636bf5d4 | ||
|
|
3899a65167 | ||
|
|
628e53c1c3 | ||
|
|
9fa424dd8d | ||
|
|
3e6f2eecfa | ||
|
|
cc655c8ba8 | ||
|
|
78aa0474ee | ||
|
|
9caefa2f49 | ||
|
|
478726fa3b | ||
|
|
f64917ec52 | ||
|
|
2bc25f91c4 | ||
|
|
623d7ffe2f | ||
|
|
07510b5099 | ||
|
|
9f21f9a7bc | ||
|
|
93da70709e | ||
|
|
00436e744a | ||
|
|
1e642fc512 | ||
|
|
6baef2450c | ||
|
|
600f34f85a | ||
|
|
6c0c6bc5c4 | ||
|
|
fcd62ed3cd | ||
|
|
785f2e3a6d | ||
|
|
c2925f7c1e | ||
|
|
60814d8b58 | ||
|
|
2dec448f2c | ||
|
|
b71f4f6800 | ||
|
|
558083a916 | ||
|
|
d507ed9dff | ||
|
|
7ed0242662 | ||
|
|
d7b3d989d7 | ||
|
|
707b2f77f0 | ||
|
|
5ddbb76979 | ||
|
|
97b0fe62d4 | ||
|
|
8ac9b2cdc7 | ||
|
|
bc4c1a13e6 | ||
|
|
d3ec303ade | ||
|
|
6cfc2a1ba6 | ||
|
|
e15cadc863 | ||
|
|
2f9284c263 | ||
|
|
2465852fd6 | ||
|
|
a9f48a0d50 | ||
|
|
4ed0035c67 | ||
|
|
b66f2dfb80 | ||
|
|
3cb155b129 | ||
|
|
df7efc04e2 | ||
|
|
a21a8457a4 | ||
|
|
020955f535 | ||
|
|
51f23a5f03 | ||
|
|
d024319441 | ||
|
|
f8f35938c0 | ||
|
|
2573ace368 | ||
|
|
6bf7269814 | ||
|
|
6695c7bf5e | ||
|
|
44a83fd817 | ||
|
|
08ddfe0649 | ||
|
|
5ba170bf1f | ||
|
|
8150d3110c | ||
|
|
312b33ae85 | ||
|
|
008eb995ed | ||
|
|
6d8848043c |
13
.eslintrc.js
@@ -1,5 +1,7 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'plugin:eslint-comments/recommended',
|
||||
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
@@ -16,6 +18,16 @@ module.exports = {
|
||||
$PropertyType: true,
|
||||
$Shape: true,
|
||||
},
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parser: 'babel-eslint',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
@@ -23,6 +35,7 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,14 +23,20 @@ const configs = {
|
||||
|
||||
shippedProposals: true,
|
||||
targets: (() => {
|
||||
const targets = {}
|
||||
const browsers = pkg.browserslist
|
||||
if (browsers !== undefined) {
|
||||
targets.browsers = browsers
|
||||
}
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
targets.node = node
|
||||
}
|
||||
return { browsers: pkg.browserslist, node }
|
||||
return targets
|
||||
})(),
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
@@ -46,6 +52,12 @@ const getConfig = (key, ...args) => {
|
||||
: config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = [
|
||||
'@babel/plugin-proposal-decorators',
|
||||
'@babel/plugin-proposal-class-properties',
|
||||
]
|
||||
|
||||
module.exports = function(pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
presets === undefined && (presets = {})
|
||||
@@ -61,7 +73,13 @@ module.exports = function(pkg, plugins, presets) {
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
plugins: Object.keys(plugins)
|
||||
.map(plugin => [plugin, plugins[plugin]])
|
||||
.sort(([a], [b]) => {
|
||||
const oA = pluginsOrder.indexOf(a)
|
||||
const oB = pluginsOrder.indexOf(b)
|
||||
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
|
||||
}),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.24.5"
|
||||
"xen-api": "^0.25.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.7.1",
|
||||
"version": "0.9.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -21,18 +21,18 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.13.0",
|
||||
"@marsaud/smb2": "^0.14.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.3.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
"tmp": "^0.1.0",
|
||||
"xo-remote-parser": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -45,7 +45,7 @@
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^6.1.0",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,10 @@ type RemoteInfo = { used?: number, size?: number }
|
||||
type File = FileDescriptor | string
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime: number[], size: number) => {
|
||||
const seconds = hrtime[0] + hrtime[1] / 1e9
|
||||
return size / seconds
|
||||
}
|
||||
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
|
||||
@@ -362,18 +366,27 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async test(): Promise<Object> {
|
||||
const SIZE = 1024 * 1024 * 10
|
||||
const testFileName = normalizePath(`${Date.now()}.test`)
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
const data = await fromCallback(cb => randomBytes(SIZE, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
const writeStart = process.hrtime()
|
||||
await this._outputFile(testFileName, data, { flags: 'wx' })
|
||||
const writeDuration = process.hrtime(writeStart)
|
||||
|
||||
step = 'read'
|
||||
const readStart = process.hrtime()
|
||||
const read = await this._readFile(testFileName, { flags: 'r' })
|
||||
const readDuration = process.hrtime(readStart)
|
||||
|
||||
if (!data.equals(read)) {
|
||||
throw new Error('output and input did not match')
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
writeRate: computeRate(writeDuration, SIZE),
|
||||
readRate: computeRate(readDuration, SIZE),
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
@@ -387,6 +400,10 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async truncate(file: string, len: number): Promise<void> {
|
||||
await this._truncate(file, len)
|
||||
}
|
||||
|
||||
async unlink(file: string, { checksum = true }: Object = {}): Promise<void> {
|
||||
file = normalizePath(file)
|
||||
|
||||
@@ -397,6 +414,18 @@ export default class RemoteHandlerAbstract {
|
||||
await this._unlink(file).catch(ignoreEnoent)
|
||||
}
|
||||
|
||||
async write(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
}
|
||||
|
||||
async writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
@@ -533,6 +562,28 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _write(file: File, buffer: Buffer, position: number): Promise<void> {
|
||||
const isPath = typeof file === 'string'
|
||||
if (isPath) {
|
||||
file = await this.openFile(file, 'r+')
|
||||
}
|
||||
try {
|
||||
return await this._writeFd(file, buffer, position)
|
||||
} finally {
|
||||
if (isPath) {
|
||||
await this.closeFile(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _writeFd(
|
||||
fd: FileDescriptor,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import getStream from 'get-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { random } from 'lodash'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
import { getHandler } from '.'
|
||||
@@ -290,9 +290,11 @@ handlers.forEach(url => {
|
||||
|
||||
describe('#test()', () => {
|
||||
it('tests the remote appears to be working', async () => {
|
||||
expect(await handler.test()).toEqual({
|
||||
success: true,
|
||||
})
|
||||
const answer = await handler.test()
|
||||
|
||||
expect(answer.success).toBe(true)
|
||||
expect(typeof answer.writeRate).toBe('number')
|
||||
expect(typeof answer.readRate).toBe('number')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -308,5 +310,70 @@ handlers.forEach(url => {
|
||||
await handler.unlink('file')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#write()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
|
||||
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
|
||||
|
||||
forOwn(
|
||||
{
|
||||
'dont increase file size': (() => {
|
||||
const offset = random(0, TEST_DATA_LEN - PATCH_DATA_LEN)
|
||||
|
||||
const expected = Buffer.from(TEST_DATA)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
'increase file size': (() => {
|
||||
const offset = random(
|
||||
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
|
||||
TEST_DATA_LEN
|
||||
)
|
||||
|
||||
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
|
||||
TEST_DATA.copy(expected)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
},
|
||||
({ offset, expected }, title) => {
|
||||
describe(title, () => {
|
||||
testWithFileDescriptor('file', 'r+', async ({ file }) => {
|
||||
await handler.write(file, PATCH_DATA, offset)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
describe('#truncate()', () => {
|
||||
forOwn(
|
||||
{
|
||||
'shrinks file': (() => {
|
||||
const length = random(0, TEST_DATA_LEN)
|
||||
const expected = TEST_DATA.slice(0, length)
|
||||
return { length, expected }
|
||||
})(),
|
||||
'grows file': (() => {
|
||||
const length = random(TEST_DATA_LEN, TEST_DATA_LEN * 2)
|
||||
const expected = Buffer.alloc(length)
|
||||
TEST_DATA.copy(expected)
|
||||
return { length, expected }
|
||||
})(),
|
||||
},
|
||||
({ length, expected }, title) => {
|
||||
it(title, async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.truncate('file', length)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -106,10 +106,18 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
await fs.access(path, fs.R_OK | fs.W_OK)
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return fs.truncate(this._getFilePath(file), len)
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
return fs.unlink(this._getFilePath(file))
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return fs.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
|
||||
}
|
||||
|
||||
@@ -155,10 +155,20 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
return this.list('.')
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client
|
||||
.truncate(this._getFilePath(file), len)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._client.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client
|
||||
.writeFile(this._getFilePath(file), data, options)
|
||||
|
||||
@@ -27,11 +27,11 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -48,6 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
138
CHANGELOG.md
@@ -1,5 +1,143 @@
|
||||
# ChangeLog
|
||||
|
||||
## **next** (2019-05-14)
|
||||
|
||||
### Enhancements
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
|
||||
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
|
||||
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.1
|
||||
- @xen-orchestra/fs v0.9.0
|
||||
- vhd-lib v0.7.0
|
||||
- xo-server v5.41.0
|
||||
- xo-web v5.41.0
|
||||
|
||||
## **5.34.0** (2019-04-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
|
||||
- [Metadata backup] Detailed logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
|
||||
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
|
||||
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
|
||||
- Unlock basic stats on all editions [#4166](https://github.com/vatesfr/xen-orchestra/issues/4166) (PR [#4172](https://github.com/vatesfr/xen-orchestra/pull/4172))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
|
||||
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
|
||||
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
|
||||
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
|
||||
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
|
||||
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
|
||||
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
|
||||
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
|
||||
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
|
||||
- [XO] Add banner for sources users to clarify support conditions [#4165](https://github.com/vatesfr/xen-orchestra/issues/4165) (PR [#4167](https://github.com/vatesfr/xen-orchestra/pull/4167))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
|
||||
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
|
||||
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
|
||||
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.0
|
||||
- complex-matcher v0.6.0
|
||||
- xo-vmdk-to-vhd v0.1.7
|
||||
- vhd-lib v0.6.1
|
||||
- xo-server v5.40.0
|
||||
- xo-web v5.40.1
|
||||
|
||||
## **5.33.1** (2019-04-04)
|
||||
|
||||
### Bug fix
|
||||
|
||||
- Fix major memory leak [2563be4](https://github.com/vatesfr/xen-orchestra/commit/2563be472bfd84c6ed867efd21c4aeeb824d387f)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.1
|
||||
- xo-server v5.38.2
|
||||
|
||||
## **5.33.0** (2019-03-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/Disk] Disable actions on unmanaged VDIs [#3988](https://github.com/vatesfr/xen-orchestra/issues/3988) (PR [#4000](https://github.com/vatesfr/xen-orchestra/pull/4000))
|
||||
- [Pool] Specify automatic networks on a Pool [#3916](https://github.com/vatesfr/xen-orchestra/issues/3916) (PR [#3958](https://github.com/vatesfr/xen-orchestra/pull/3958))
|
||||
- [VM/advanced] Manage start delay for VM [#3909](https://github.com/vatesfr/xen-orchestra/issues/3909) (PR [#4002](https://github.com/vatesfr/xen-orchestra/pull/4002))
|
||||
- [New/Vm] SR section: Display warning message when the selected SRs aren't in the same host [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3967](https://github.com/vatesfr/xen-orchestra/pull/3967))
|
||||
- Enable compression for HTTP requests (and initial objects fetch)
|
||||
- [VDI migration] Display same-pool SRs first in the selector [#3945](https://github.com/vatesfr/xen-orchestra/issues/3945) (PR [#3996](https://github.com/vatesfr/xen-orchestra/pull/3996))
|
||||
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
|
||||
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
|
||||
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
- Improve connection to XCP-ng/XenServer hosts:
|
||||
- never disconnect by itself even in case of errors
|
||||
- never stop watching events
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New network] PIF was wrongly required which prevented from creating a private network (PR [#4010](https://github.com/vatesfr/xen-orchestra/pull/4010))
|
||||
- [Google authentication] Migrate to new endpoint
|
||||
- [Backup NG] Better handling of huge logs [#4025](https://github.com/vatesfr/xen-orchestra/issues/4025) (PR [#4026](https://github.com/vatesfr/xen-orchestra/pull/4026))
|
||||
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
|
||||
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
|
||||
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.0
|
||||
- vhd-lib v0.6.0
|
||||
- @xen-orchestra/fs v0.8.0
|
||||
- xo-server-usage-report v0.7.2
|
||||
- xo-server v5.38.1
|
||||
- xo-web v5.38.0
|
||||
|
||||
## **5.32.2** (2019-02-28)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Fix XAPI events monitoring on old version (XenServer 7.2)
|
||||
|
||||
## **5.32.1** (2019-02-28)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Fix a very short timeout in the monitoring of XAPI events which may lead to unresponsive XenServer hosts
|
||||
|
||||
## **5.32.0** (2019-02-28)
|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -2,9 +2,24 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
|
||||
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
|
||||
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
|
||||
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
|
||||
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
|
||||
- Prevent non-admin users to access admin pages with URL (PR [#4220](https://github.com/vatesfr/xen-orchestra/pull/4220))
|
||||
- [Upgrade] Fix alert before upgrade while running backup jobs [#4164](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
|
||||
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
|
||||
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
|
||||
- [Remotes] Fix disconnected remotes which may appear to work
|
||||
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server v5.37.0
|
||||
- xo-web v5.37.0
|
||||
- xo-server v5.42.0
|
||||
- xo-web v5.42.0
|
||||
|
||||
@@ -14,5 +14,5 @@
|
||||
|
||||
1. create a PR as soon as possible
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer
|
||||
1. when you want a review, add a reviewer (and only one)
|
||||
1. if necessary, update your PR, and re- add a reviewer
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Xen Orchestra [](https://go.crisp.im/chat/embed/?website_id=-JzqzzwddSV7bKGtEyAQ) [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
# Xen Orchestra [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||

|
||||
|
||||
|
||||
BIN
docs/assets/cr-seed-1.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/assets/cr-seed-2.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/cr-seed-3.png
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
docs/assets/cr-seed-4.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
docs/assets/metadata-1.png
Normal file
|
After Width: | Height: | Size: 9.4 KiB |
BIN
docs/assets/metadata-2.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
docs/assets/metadata-3.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/assets/metadata-4.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
docs/assets/metadata-5.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
docs/assets/metadata-6.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
docs/assets/metadata-7.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
@@ -12,7 +12,9 @@ Another good way to check if there is activity is the XOA VM stats view (on the
|
||||
|
||||
### VDI chain protection
|
||||
|
||||
This means your previous VM disks and snapshots should be "merged" (*coalesced* in the XenServer world) before we can take a new snapshot. This mechanism is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. Otherwise, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product dealing with this.
|
||||
Backup jobs regularly delete snapshots. When a snapshot is deleted, either manually or via a backup job, it triggers the need for Xenserver to coalesce the VDI chain - to merge the remaining VDIs and base copies in the chain. This means generally we cannot take too many new snapshots on said VM until Xenserver has finished running a coalesce job on the VDI chain.
|
||||
|
||||
This mechanism and scheduling is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. If we don't, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product that takes this into account and offers protection.
|
||||
|
||||
Without this detection, you could have 2 potential issues:
|
||||
|
||||
@@ -21,9 +23,9 @@ Without this detection, you could have 2 potential issues:
|
||||
|
||||
The first issue is a chain that contains more than 30 elements (fixed XenServer limit), and the other one means it's full because the "coalesce" process couldn't keep up the pace and the storage filled up.
|
||||
|
||||
In the end, this message is a **protection mechanism against damaging your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
|
||||
In the end, this message is a **protection mechanism preventing damage to your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
|
||||
|
||||
Just remember this: **coalesce will happen every time a snapshot is removed**.
|
||||
Just remember this: **a coalesce should happen every time a snapshot is removed**.
|
||||
|
||||
> You can read more on this on our dedicated blog post regarding [XenServer coalesce detection](https://xen-orchestra.com/blog/xenserver-coalesce-detection-in-xen-orchestra/).
|
||||
|
||||
@@ -37,7 +39,9 @@ Coalesce jobs can also fail to run if the SR does not have enough free space. Ch
|
||||
|
||||
You can check if a coalesce job is currently active by running `ps axf | grep vhd` on the XenServer host and looking for a VHD process in the results (one of the resulting processes will be the grep command you just ran, ignore that one).
|
||||
|
||||
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
|
||||
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
|
||||
|
||||
As a last resort, migrating the VM (more specifically, its disks) to a new storage repository will also force a coalesce and solve this issue. That means migrating a VM to another host (with its own storage) and back will force the VDI chain for that VM to be coalesced, and get rid of the `VDI Chain Protection` message.
|
||||
|
||||
### Parse Error
|
||||
|
||||
|
||||
@@ -43,11 +43,19 @@ To protect the replication, we removed the possibility to boot your copied VM di
|
||||
|
||||
### Job creation
|
||||
|
||||
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, note its identifiers, the main `backupJobId` and the ID of one on the schedules for the job, `backupScheduleId`.
|
||||
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, copy the job's `backupJobId` by hovering to the left of the shortened ID and clicking the copy to clipboard button:
|
||||
|
||||

|
||||
|
||||
Copy it somewhere temporarily. Now we need to also copy the ID of the job schedule, `backupScheduleId`. Do this by hovering over the schedule name in the same panel as before, and clicking the copy to clipboard button. Keep it with the `backupJobId` you copied previously as we will need them all later:
|
||||
|
||||

|
||||
|
||||
### Seed creation
|
||||
|
||||
Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUuid` from the snapshot panel for the VM.
|
||||
Manually create a snapshot on the VM being backed up, then copy this snapshot UUID, `snapshotUuid` from the snapshot panel of the VM:
|
||||
|
||||

|
||||
|
||||
> DO NOT ever delete or alter this snapshot, feel free to rename it to make that clear.
|
||||
|
||||
@@ -55,7 +63,9 @@ Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUu
|
||||
|
||||
Export this snapshot to a file, then import it on the target SR.
|
||||
|
||||
Note the UUID of this newly created VM as `targetVmUuid`.
|
||||
We need to copy the UUID of this newly created VM as well, `targetVmUuid`:
|
||||
|
||||

|
||||
|
||||
> DO not start this VM or it will break the Continuous Replication job! You can rename this VM to more easily remember this.
|
||||
|
||||
@@ -66,7 +76,7 @@ The XOA backup system requires metadata to correctly associate the source snapsh
|
||||
First install the tool (all the following is done from the XOA VM CLI):
|
||||
|
||||
```
|
||||
npm i -g xo-cr-seed
|
||||
sudo npm i -g --unsafe-perm @xen-orchestra/cr-seed-cli
|
||||
```
|
||||
|
||||
Here is an example of how the utility expects the UUIDs and info passed to it:
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
|
||||
# Installation
|
||||
|
||||
SSH to your XenServer and execute the following:
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your XenServer host itself, it's 100% safe.
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Metadata backup
|
||||
|
||||
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
|
||||
> WARNING: Metadata backup is an experimental feature. Unexpected issues are possible, but unlikely.
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -11,21 +11,38 @@ In Xen Orchestra, Metadata backup is divided into two different options:
|
||||
* Pool metadata backup
|
||||
* XO configuration backup
|
||||
|
||||
### How to use metadata backup
|
||||
### Performing a backup
|
||||
|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
|
||||

|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata:
|
||||

|
||||
|
||||
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
|
||||
|
||||

|
||||

|
||||
|
||||
Define the name and retention for the job.
|
||||
|
||||

|
||||

|
||||
|
||||
Once created, the job is displayed with the other classic jobs.
|
||||
|
||||

|
||||

|
||||
|
||||
> Restore for metadata backup jobs should be available in XO 5.33
|
||||
|
||||
### Performing a restore
|
||||
|
||||
> WARNING: restoring pool metadata completely overwrites the XAPI database of a host. Only perform a metadata restore if it is a new server with nothing running on it (eg replacing a host with new hardware).
|
||||
|
||||
If you browse to the Backup NG Restore panel, you will now notice a Metadata filter button:
|
||||
|
||||

|
||||
|
||||
If you click this button, it will show you Metadata backups available for restore:
|
||||
|
||||

|
||||
|
||||
You can see both our Xen Orchestra config backup, and our pool metadata backup. To restore one, simply click the blue restore arrow, choose a backup date to restore, and click OK:
|
||||
|
||||

|
||||
|
||||
That's it!
|
||||
@@ -1,24 +1,33 @@
|
||||
# Support
|
||||
|
||||
You can access our pro support if you subscribe to any of these plans:
|
||||
Xen Orchestra will run in a controlled/tested environment thanks to XOA ([Xen Orchestra virtual Appliance](https://xen-orchestra.com/#!/xoa)). **This is the way to get pro support**. Any account with a registered XOA can access a [dedicated support panel](https://xen-orchestra.com/#!/member/support).
|
||||
|
||||
XOA is available in multiple plans:
|
||||
|
||||
* Free
|
||||
* Starter
|
||||
* Enterprise
|
||||
* Premium
|
||||
|
||||
The better the plan, the faster the support will be with higher priority.
|
||||
Higher tier support plans include faster ticket response times (and cover more features). Paid support plans and response times are based on the plan you have, plans can be [reviewed here](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
## XOA Free support
|
||||
|
||||
With the free version of the Xen Orchestra Appliance (XOA free), you can open support tickets and we will do our best to assist you, however, this support is limited and is not guaranteed in regards to response times or resolutions offered.
|
||||
|
||||
## Community support
|
||||
|
||||
If you are using Xen Orchestra via the sources, you can ask questions and try to recieve help two different ways:
|
||||
If you are using Xen Orchestra via the source and not XOA, you can ask questions and try to recieve help through a number of different ways:
|
||||
|
||||
* In our [forum](https://xen-orchestra.com/forum/)
|
||||
* In our [forum](https://xcp-ng.org/forum/category/12/xen-orchestra)
|
||||
* In our IRC - `#xen-orchestra` on `Freenode`
|
||||
|
||||
However, there's no guarantee you will receive an answer and no guaranteed response time. If you are using XO from sources, we encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
We encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
|
||||
If you are using Xen Orchestra in production, please subscribe to a plan.
|
||||
Lastly while Xen Orchestra is free and Open Source software, supporting and developing it takes a lot of effort. If you are considering using Xen Orchestra in production, please subscribe for one of our [professional support plans](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
> Note: support from the sources is harder, because Xen Orchestra can potentially run on any Linux distro (or even FreeBSD and Windows!). Always try to double check that you followed our guide on how to [install it from the sources](https://xen-orchestra.com/docs/from_the_sources.html) before going further.
|
||||
|
||||
## Open a ticket
|
||||
|
||||
If you have a subscription, you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
If you have a subscription (or at least a registered free XOA), you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
@@ -10,19 +10,20 @@
|
||||
"eslint-config-prettier": "^4.1.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^8.0.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.94.0",
|
||||
"flow-bin": "^0.98.0",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^1.2.1",
|
||||
"husky": "^2.2.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "complex-matcher",
|
||||
"version": "0.5.0",
|
||||
"version": "0.6.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -25,7 +25,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
@@ -44,6 +44,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -599,6 +599,13 @@ export const parse = parser.parse.bind(parser)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const _extractStringFromRegexp = child => {
|
||||
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
|
||||
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
|
||||
return unescapedRegexp
|
||||
}
|
||||
}
|
||||
|
||||
const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof Or) {
|
||||
const strings = []
|
||||
@@ -606,6 +613,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
strings.push(child.value)
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
strings.push(unescapedRegexp)
|
||||
}
|
||||
}
|
||||
})
|
||||
return strings
|
||||
}
|
||||
@@ -613,6 +626,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
return [child.value]
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
return [unescapedRegexp]
|
||||
}
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
@@ -12,10 +12,13 @@ import {
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
|
||||
const tmp = getPropertyClausesStrings(
|
||||
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
|
||||
)
|
||||
expect(tmp).toEqual({
|
||||
bar: ['baz'],
|
||||
baz: ['foo', 'bar'],
|
||||
baz: ['foo', 'bar', 'boo', 'far'],
|
||||
foo: ['bar'],
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
@@ -43,6 +43,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-cli",
|
||||
"version": "0.2.0",
|
||||
"version": "0.3.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -27,12 +27,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.5.1"
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -42,15 +42,16 @@
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
33
packages/vhd-cli/src/commands/repl.js
Normal file
@@ -0,0 +1,33 @@
|
||||
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { relative } from 'path'
|
||||
import { start as createRepl } from 'repl'
|
||||
import Vhd, * as vhdLib from 'vhd-lib'
|
||||
|
||||
export default async args => {
|
||||
const cwd = process.cwd()
|
||||
const handler = getHandler({ url: 'file://' + cwd })
|
||||
await handler.sync()
|
||||
try {
|
||||
const repl = createRepl({
|
||||
prompt: 'vhd> ',
|
||||
})
|
||||
Object.assign(repl.context, vhdLib)
|
||||
repl.context.handler = handler
|
||||
repl.context.open = path => new Vhd(handler, relative(cwd, path))
|
||||
|
||||
// Make the REPL waits for promise completion.
|
||||
repl.eval = (evaluate => (cmd, context, filename, cb) => {
|
||||
asCallback.call(
|
||||
fromCallback(cb => {
|
||||
evaluate.call(repl, cmd, context, filename, cb)
|
||||
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
|
||||
cb
|
||||
)
|
||||
})(repl.eval)
|
||||
|
||||
await fromEvent(repl, 'exit')
|
||||
} finally {
|
||||
await handler.forget()
|
||||
}
|
||||
}
|
||||
@@ -1,38 +1,40 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
|
||||
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
|
||||
|
||||
import { SECTOR_SIZE } from './src/_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
let tempDir = null
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function createRandomFile(name, sizeMb) {
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
||||
])
|
||||
async function createRandomFile(name, sizeMB) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = createRandomStream(sizeMB * 1024 * 1024)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
async function checkFile(vhdName) {
|
||||
@@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) {
|
||||
|
||||
test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, vhdFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const vhd = new Vhd(handler, emptyFileName)
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
await checkFile(emptyFileName)
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
@@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
// we recover the data manually for speed reasons.
|
||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||
// hole before the block of data
|
||||
const recoveredFile = await fs.open('recovered', 'w')
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
const recoveredFile = await fs.open(recoveredFileName, 'w')
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
const vhd2 = new Vhd(handler, emptyFileName)
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
@@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
await fs.createReadStream(recoveredFileName, {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
@@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
@@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => {
|
||||
splitPointSectors,
|
||||
randomData.slice(splitPointSectors * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
@@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => {
|
||||
startSecondWrite,
|
||||
randomData.slice(startSecondWrite * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, vhdFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works with empty parent files', async () => {
|
||||
const mbOfRandom = 2
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'empty.vhd',
|
||||
emptyFileName,
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
await checkFile(vhdFileName)
|
||||
await checkFile(emptyFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler._getSize(rawFileName)
|
||||
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
|
||||
await checkFile(vhdFileName)
|
||||
await checkFile(emptyFileName)
|
||||
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works in normal cases', async () => {
|
||||
const mbOfRandom = 5
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
||||
const randomFileName = `${tempDir}/randomfile`
|
||||
const random2FileName = `${tempDir}/randomfile2`
|
||||
const smallRandomFileName = `${tempDir}/small_randomfile`
|
||||
const parentFileName = `${tempDir}/parent.vhd`
|
||||
const child1FileName = `${tempDir}/child1.vhd`
|
||||
const child2FileName = `${tempDir}/child2.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(randomFileName, mbOfRandom)
|
||||
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'parent.vhd',
|
||||
parentFileName,
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await convertFromRawToVhd(randomFileName, child1FileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
await execa('vhd-util', [
|
||||
'snapshot',
|
||||
'-n',
|
||||
child2FileName,
|
||||
'-p',
|
||||
child1FileName,
|
||||
])
|
||||
const vhd = new Vhd(handler, child2FileName)
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
const originalSize = await handler._getSize(randomFileName)
|
||||
await chainVhd(handler, parentFileName, handler, child1FileName, true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
|
||||
await chainVhd(handler, child1FileName, handler, child2FileName, true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
|
||||
const smallRandom = await fs.readFile(smallRandomFileName)
|
||||
const newVhd = new Vhd(handler, child2FileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
||||
await checkFile('child2.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await recoverRawContent(
|
||||
'parent.vhd',
|
||||
'recovered_from_coalescing',
|
||||
originalSize
|
||||
)
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
await checkFile(child2FileName)
|
||||
await checkFile(child1FileName)
|
||||
await checkFile(parentFileName)
|
||||
await vhdMerge(handler, parentFileName, handler, child1FileName)
|
||||
await checkFile(parentFileName)
|
||||
await chainVhd(handler, parentFileName, handler, child2FileName, true)
|
||||
await checkFile(child2FileName)
|
||||
await vhdMerge(handler, parentFileName, handler, child2FileName)
|
||||
await checkFile(parentFileName)
|
||||
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
|
||||
await execa('cp', [randomFileName, random2FileName])
|
||||
const fd = await fs.open(random2FileName, 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
||||
await fs.readFile('randomfile2')
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(random2FileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
test.only('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
const expectedVhdSize = 4197888
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = await createSyntheticStream(handler, 'randomfile.vhd')
|
||||
expect(stream.length).toEqual(expectedVhdSize)
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
await checkFile(vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const stream = await createSyntheticStream(handler, vhdFileName)
|
||||
const expectedVhdSize = (await fs.stat(vhdFileName)).size
|
||||
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
|
||||
await pFromCallback(cb =>
|
||||
pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb)
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
const stats = await fs.stat('recovered.vhd')
|
||||
await checkFile(recoveredVhdFileName)
|
||||
const stats = await fs.stat(recoveredVhdFileName)
|
||||
expect(stats.size).toEqual(expectedVhdSize)
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.5.1",
|
||||
"version": "0.7.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -22,11 +22,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "3.0.0-beta.3",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -35,15 +35,16 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -51,6 +52,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
20
packages/vhd-lib/src/_checkFooter.js
Normal file
@@ -0,0 +1,20 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import {
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
export default footer => {
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
}
|
||||
14
packages/vhd-lib/src/_checkHeader.js
Normal file
@@ -0,0 +1,14 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default (header, footer) => {
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
if (footer !== undefined) {
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
}
|
||||
}
|
||||
45
packages/vhd-lib/src/_getFirstAndLastBlocks.js
Normal file
@@ -0,0 +1,45 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import { BLOCK_UNUSED } from './_constants'
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
export default bat => {
|
||||
const n = bat.length
|
||||
assert.notStrictEqual(n, 0)
|
||||
assert.strictEqual(n % 4, 0)
|
||||
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (j === n) {
|
||||
return
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (j < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
50
packages/vhd-lib/src/_readChunk.js
Normal file
@@ -0,0 +1,50 @@
|
||||
export default async function readChunk(stream, n) {
|
||||
if (n === 0) {
|
||||
return Buffer.alloc(0)
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let i = 0
|
||||
|
||||
function clean() {
|
||||
stream.removeListener('readable', onReadable)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
|
||||
function resolve2() {
|
||||
clean()
|
||||
resolve(Buffer.concat(chunks, i))
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve2()
|
||||
clean()
|
||||
}
|
||||
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
|
||||
function onReadable() {
|
||||
const chunk = stream.read(n - i)
|
||||
if (chunk === null) {
|
||||
return // wait for more data
|
||||
}
|
||||
i += chunk.length
|
||||
chunks.push(chunk)
|
||||
if (i >= n) {
|
||||
resolve2()
|
||||
}
|
||||
}
|
||||
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
|
||||
if (stream.readable) {
|
||||
onReadable()
|
||||
}
|
||||
})
|
||||
}
|
||||
132
packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js
Normal file
@@ -0,0 +1,132 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import rimraf from 'rimraf'
|
||||
import getStream from 'get-stream'
|
||||
import tmp from 'tmp'
|
||||
import { createReadStream, createWriteStream } from 'fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import { createVhdStreamWithLength } from '.'
|
||||
import { FOOTER_SIZE } from './_constants'
|
||||
|
||||
let tempDir = null
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
const RAW = 'raw'
|
||||
const VHD = 'vpc'
|
||||
const convert = (inputFormat, inputFile, outputFormat, outputFile) =>
|
||||
execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
inputFormat,
|
||||
'-O',
|
||||
outputFormat,
|
||||
inputFile,
|
||||
outputFile,
|
||||
])
|
||||
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
let requested = Math.min(size, yield)
|
||||
while (size > 0) {
|
||||
const buf = Buffer.allocUnsafe(requested)
|
||||
for (let i = 0; i < requested; ++i) {
|
||||
buf[i] = Math.floor(Math.random() * 256)
|
||||
}
|
||||
requested = Math.min((size -= requested), yield buf)
|
||||
}
|
||||
})
|
||||
|
||||
async function createRandomFile(name, size) {
|
||||
const input = await createRandomStream(size)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
const forOwn = (object, cb) =>
|
||||
Object.keys(object).forEach(key => cb(object[key], key, object))
|
||||
|
||||
describe('createVhdStreamWithLength', () => {
|
||||
forOwn(
|
||||
{
|
||||
// qemu-img requires this length or it fill with null bytes which breaks
|
||||
// the test
|
||||
'can extract length': 34816,
|
||||
|
||||
'can handle empty file': 0,
|
||||
},
|
||||
(size, title) =>
|
||||
it(title, async () => {
|
||||
const inputRaw = `${tempDir}/input.raw`
|
||||
await createRandomFile(inputRaw, size)
|
||||
|
||||
const inputVhd = `${tempDir}/input.vhd`
|
||||
await convert(RAW, inputRaw, VHD, inputVhd)
|
||||
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(inputVhd)
|
||||
)
|
||||
const { length } = result
|
||||
|
||||
const outputVhd = `${tempDir}/output.vhd`
|
||||
await pFromCallback(
|
||||
pipeline.bind(undefined, result, await createWriteStream(outputVhd))
|
||||
)
|
||||
|
||||
// ensure the guessed length correspond to the stream length
|
||||
const { size: outputSize } = await fs.stat(outputVhd)
|
||||
expect(length).toEqual(outputSize)
|
||||
|
||||
// ensure the generated VHD is correct and contains the same data
|
||||
const outputRaw = `${tempDir}/output.raw`
|
||||
await convert(VHD, outputVhd, RAW, outputRaw)
|
||||
await execa('cmp', [inputRaw, outputRaw])
|
||||
})
|
||||
)
|
||||
|
||||
it('can skip blank after the last block and before the footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convert(RAW, rawFileName, VHD, vhdName)
|
||||
const { size: vhdSize } = await fs.stat(vhdName)
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const { size: longerSize } = await fs.stat(vhdName)
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const { size: outputSize } = await fs.stat(outputVhdName)
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
})
|
||||
84
packages/vhd-lib/src/createVhdStreamWithLength.js
Normal file
@@ -0,0 +1,84 @@
|
||||
import assert from 'assert'
|
||||
import { pipeline, Transform } from 'readable-stream'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import noop from './_noop'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import readChunk from './_readChunk'
|
||||
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
|
||||
import { fuFooter, fuHeader } from './_structs'
|
||||
|
||||
class EndCutterStream extends Transform {
|
||||
constructor(footerOffset, footerBuffer) {
|
||||
super()
|
||||
this._footerOffset = footerOffset
|
||||
this._footerBuffer = footerBuffer
|
||||
this._position = 0
|
||||
this._done = false
|
||||
}
|
||||
|
||||
_transform(data, encoding, callback) {
|
||||
if (!this._done) {
|
||||
if (this._position + data.length >= this._footerOffset) {
|
||||
this._done = true
|
||||
const difference = this._footerOffset - this._position
|
||||
data = data.slice(0, difference)
|
||||
this.push(data)
|
||||
this.push(this._footerBuffer)
|
||||
} else {
|
||||
this.push(data)
|
||||
}
|
||||
this._position += data.length
|
||||
}
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
||||
export default async function createVhdStreamWithLength(stream) {
|
||||
const readBuffers = []
|
||||
let streamPosition = 0
|
||||
|
||||
async function readStream(length) {
|
||||
const chunk = await readChunk(stream, length)
|
||||
assert.strictEqual(chunk.length, length)
|
||||
streamPosition += chunk.length
|
||||
readBuffers.push(chunk)
|
||||
return chunk
|
||||
}
|
||||
|
||||
const footerBuffer = await readStream(FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
checkFooter(footer)
|
||||
|
||||
const header = fuHeader.unpack(await readStream(HEADER_SIZE))
|
||||
checkHeader(header, footer)
|
||||
|
||||
await readStream(header.tableOffset - streamPosition)
|
||||
|
||||
const table = await readStream(header.maxTableEntries * 4)
|
||||
|
||||
readBuffers.reverse()
|
||||
for (const buf of readBuffers) {
|
||||
stream.unshift(buf)
|
||||
}
|
||||
|
||||
const firstAndLastBlocks = getFirstAndLastBlocks(table)
|
||||
const footerOffset =
|
||||
firstAndLastBlocks !== undefined
|
||||
? firstAndLastBlocks.lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) *
|
||||
SECTOR_SIZE +
|
||||
header.blockSize
|
||||
: Math.ceil(streamPosition / SECTOR_SIZE) * SECTOR_SIZE
|
||||
|
||||
// ignore any data after footerOffset and push footerBuffer
|
||||
//
|
||||
// this is necessary to ignore any blank space between the last block and the
|
||||
// final footer which would invalidate the size we computed
|
||||
const newStream = new EndCutterStream(footerOffset, footerBuffer)
|
||||
pipeline(stream, newStream, noop)
|
||||
|
||||
newStream.length = footerOffset + FOOTER_SIZE
|
||||
return newStream
|
||||
}
|
||||
@@ -11,3 +11,6 @@ export {
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
export {
|
||||
default as createVhdStreamWithLength,
|
||||
} from './createVhdStreamWithLength'
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import constantStream from './_constant-stream'
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
@@ -170,21 +165,10 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
checkFooter(footer)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
checkHeader(header, footer)
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
@@ -242,109 +226,49 @@ export default class Vhd {
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
_getFirstAndLastBlocks() {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (i < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
// Write a buffer at a given position in a vhd file.
|
||||
async _write(data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
assert(Buffer.isBuffer(data))
|
||||
debug(`_write offset=${offset} size=${data.length}`)
|
||||
return this._handler.write(this._path, data, offset)
|
||||
}
|
||||
|
||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
const firstAndLastBlocks = getFirstAndLastBlocks(this.blockTable)
|
||||
if (firstAndLastBlocks === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { first, firstSector, lastSector } = firstAndLastBlocks
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const block = await this._read(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(block, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (!e.noBlock) {
|
||||
throw e
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const block = await this._read(sectorsToBytes(firstSector), fullBlockSize)
|
||||
await this._write(block, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -367,7 +291,7 @@ export default class Vhd {
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
Buffer.alloc(maxTableEntries - prevMaxTableEntries, BUF_BLOCK_UNUSED),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
@@ -392,10 +316,7 @@ export default class Vhd {
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
@@ -4,22 +4,20 @@ import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import { createReadableRawStream, createReadableSparseStream } from './'
|
||||
|
||||
import { createFooter } from './src/_createFooterHeader'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
let tempDir = null
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
@@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => {
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await pFromCallback(cb =>
|
||||
pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb)
|
||||
)
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
@@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err =>
|
||||
err ? reject(err) : resolve()
|
||||
)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
@@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
blocks
|
||||
)
|
||||
expect(stream.length).toEqual(4197888)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
`${tempDir}/output.vhd`,
|
||||
`${tempDir}/out1.raw`,
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const out1 = await readFile(`${tempDir}/out1.raw`)
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.24.5"
|
||||
"xen-api": "^0.25.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
@@ -56,6 +56,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ const { PassThrough, pipeline } = require('readable-stream')
|
||||
const humanFormat = require('human-format')
|
||||
const Throttle = require('throttle')
|
||||
|
||||
const { isOpaqueRef } = require('../')
|
||||
const isOpaqueRef = require('../dist/_isOpaqueRef').default
|
||||
|
||||
exports.createInputStream = path => {
|
||||
if (path === undefined || path === '-') {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.24.5",
|
||||
"version": "0.25.1",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -33,12 +33,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"debug": "^4.0.1",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"http-request-plus": "^0.7.2",
|
||||
"iterable-backoff": "^0.1.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jest-diff": "^24.0.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"kindof": "^2.0.0",
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -54,7 +54,10 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-class-properties": "^7.3.4",
|
||||
"@babel/plugin-proposal-decorators": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.2.0",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
@@ -66,6 +69,7 @@
|
||||
"plot": "gnuplot -p memory-test.gnu",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
30
packages/xen-api/src/_XapiError.js
Normal file
@@ -0,0 +1,30 @@
|
||||
import { BaseError } from 'make-error'
|
||||
|
||||
export default class XapiError extends BaseError {
|
||||
static wrap(error) {
|
||||
let code, params
|
||||
if (Array.isArray(error)) {
|
||||
// < XenServer 7.3
|
||||
;[code, ...params] = error
|
||||
} else {
|
||||
code = error.message
|
||||
params = error.data
|
||||
if (!Array.isArray(params)) {
|
||||
params = []
|
||||
}
|
||||
}
|
||||
return new XapiError(code, params)
|
||||
}
|
||||
|
||||
constructor(code, params) {
|
||||
super(`${code}(${params.join(', ')})`)
|
||||
|
||||
this.code = code
|
||||
this.params = params
|
||||
|
||||
// slots than can be assigned later
|
||||
this.call = undefined
|
||||
this.url = undefined
|
||||
this.task = undefined
|
||||
}
|
||||
}
|
||||
15
packages/xen-api/src/_coalesceCalls.js
Normal file
@@ -0,0 +1,15 @@
|
||||
// decorates fn so that more than one concurrent calls will be coalesced
|
||||
export default function coalesceCalls(fn) {
|
||||
let promise
|
||||
const clean = () => {
|
||||
promise = undefined
|
||||
}
|
||||
return function() {
|
||||
if (promise !== undefined) {
|
||||
return promise
|
||||
}
|
||||
promise = fn.apply(this, arguments)
|
||||
promise.then(clean, clean)
|
||||
return promise
|
||||
}
|
||||
}
|
||||
26
packages/xen-api/src/_coalesceCalls.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
|
||||
import coalesceCalls from './_coalesceCalls'
|
||||
|
||||
describe('coalesceCalls', () => {
|
||||
it('decorates an async function', async () => {
|
||||
const fn = coalesceCalls(promise => promise)
|
||||
|
||||
const defer1 = pDefer()
|
||||
const promise1 = fn(defer1.promise)
|
||||
const defer2 = pDefer()
|
||||
const promise2 = fn(defer2.promise)
|
||||
|
||||
defer1.resolve('foo')
|
||||
expect(await promise1).toBe('foo')
|
||||
expect(await promise2).toBe('foo')
|
||||
|
||||
const defer3 = pDefer()
|
||||
const promise3 = fn(defer3.promise)
|
||||
|
||||
defer3.resolve('bar')
|
||||
expect(await promise3).toBe('bar')
|
||||
})
|
||||
})
|
||||
3
packages/xen-api/src/_debug.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import debug from 'debug'
|
||||
|
||||
export default debug('xen-api')
|
||||
22
packages/xen-api/src/_getTaskResult.js
Normal file
@@ -0,0 +1,22 @@
|
||||
import { Cancel } from 'promise-toolbox'
|
||||
|
||||
import XapiError from './_XapiError'
|
||||
|
||||
export default task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
const error = XapiError.wrap(task.error_info)
|
||||
error.task = task
|
||||
return Promise.reject(error)
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
3
packages/xen-api/src/_isGetAllRecordsMethod.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const SUFFIX = '.get_all_records'
|
||||
|
||||
export default method => method.endsWith(SUFFIX)
|
||||
3
packages/xen-api/src/_isOpaqueRef.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const PREFIX = 'OpaqueRef:'
|
||||
|
||||
export default value => typeof value === 'string' && value.startsWith(PREFIX)
|
||||
4
packages/xen-api/src/_isReadOnlyCall.js
Normal file
@@ -0,0 +1,4 @@
|
||||
const RE = /^[^.]+\.get_/
|
||||
|
||||
export default (method, args) =>
|
||||
args.length === 1 && typeof args[0] === 'string' && RE.test(method)
|
||||
8
packages/xen-api/src/_makeCallSetting.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export default (setting, defaultValue) =>
|
||||
setting === undefined
|
||||
? () => defaultValue
|
||||
: typeof setting === 'function'
|
||||
? setting
|
||||
: typeof setting === 'object'
|
||||
? method => setting[method] ?? setting['*'] ?? defaultValue
|
||||
: () => setting
|
||||
18
packages/xen-api/src/_parseUrl.js
Normal file
@@ -0,0 +1,18 @@
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
|
||||
|
||||
export default url => {
|
||||
const matches = URL_RE.exec(url)
|
||||
if (matches === null) {
|
||||
throw new Error('invalid URL: ' + url)
|
||||
}
|
||||
|
||||
const [, protocol = 'https:', username, password, hostname, port] = matches
|
||||
const parsedUrl = { protocol, hostname, port }
|
||||
if (username !== undefined) {
|
||||
parsedUrl.username = decodeURIComponent(username)
|
||||
}
|
||||
if (password !== undefined) {
|
||||
parsedUrl.password = decodeURIComponent(password)
|
||||
}
|
||||
return parsedUrl
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
import { createClient } from './'
|
||||
@@ -25,6 +26,20 @@ function askPassword(prompt = 'Password: ') {
|
||||
})
|
||||
}
|
||||
|
||||
const { getPrototypeOf, ownKeys } = Reflect
|
||||
function getAllBoundDescriptors(object) {
|
||||
const descriptors = { __proto__: null }
|
||||
let current = object
|
||||
do {
|
||||
ownKeys(current).forEach(key => {
|
||||
if (!(key in descriptors)) {
|
||||
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
|
||||
}
|
||||
})
|
||||
} while ((current = getPrototypeOf(current)) !== null)
|
||||
return descriptors
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
|
||||
@@ -78,11 +93,17 @@ const main = async args => {
|
||||
const repl = createRepl({
|
||||
prompt: `${xapi._humanId}> `,
|
||||
})
|
||||
repl.context.xapi = xapi
|
||||
|
||||
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
repl.context.find = predicate => find(xapi.objects.all, predicate)
|
||||
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
{
|
||||
const ctx = repl.context
|
||||
ctx.xapi = xapi
|
||||
|
||||
ctx.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
ctx.find = predicate => find(xapi.objects.all, predicate)
|
||||
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
|
||||
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
|
||||
}
|
||||
|
||||
// Make the REPL waits for promise completion.
|
||||
repl.eval = (evaluate => (cmd, context, filename, cb) => {
|
||||
|
||||
@@ -4,31 +4,33 @@ import { pDelay } from 'promise-toolbox'
|
||||
|
||||
import { createClient } from './'
|
||||
|
||||
const xapi = (() => {
|
||||
const [, , url, user, password] = process.argv
|
||||
|
||||
return createClient({
|
||||
auth: { user, password },
|
||||
async function main([url]) {
|
||||
const xapi = createClient({
|
||||
allowUnauthorized: true,
|
||||
url,
|
||||
watchEvents: false,
|
||||
})
|
||||
})()
|
||||
await xapi.connect()
|
||||
|
||||
xapi
|
||||
.connect()
|
||||
|
||||
// Get the pool record's ref.
|
||||
.then(() => xapi.call('pool.get_all'))
|
||||
|
||||
// Injects lots of events.
|
||||
.then(([poolRef]) => {
|
||||
const loop = () =>
|
||||
pDelay
|
||||
.call(
|
||||
xapi.call('event.inject', 'pool', poolRef),
|
||||
10 // A small delay is required to avoid overloading the Xen API.
|
||||
)
|
||||
.then(loop)
|
||||
|
||||
return loop()
|
||||
let loop = true
|
||||
process.on('SIGINT', () => {
|
||||
loop = false
|
||||
})
|
||||
|
||||
const { pool } = xapi
|
||||
// eslint-disable-next-line no-unmodified-loop-condition
|
||||
while (loop) {
|
||||
await pool.update_other_config(
|
||||
'xo:injectEvents',
|
||||
Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
)
|
||||
await pDelay(1e2)
|
||||
}
|
||||
|
||||
await pool.update_other_config('xo:injectEvents', null)
|
||||
await xapi.disconnect()
|
||||
}
|
||||
|
||||
main(process.argv.slice(2)).catch(console.error)
|
||||
|
||||
3
packages/xen-api/src/transports/_UnsupportedTransport.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import makeError from 'make-error'
|
||||
|
||||
export default makeError('UnsupportedTransport')
|
||||
25
packages/xen-api/src/transports/_prepareXmlRpcParams.js
Normal file
@@ -0,0 +1,25 @@
|
||||
// Prepare values before passing them to the XenAPI:
|
||||
//
|
||||
// - cast integers to strings
|
||||
export default function prepare(param) {
|
||||
if (Number.isInteger(param)) {
|
||||
return String(param)
|
||||
}
|
||||
|
||||
if (typeof param !== 'object' || param === null) {
|
||||
return param
|
||||
}
|
||||
|
||||
if (Array.isArray(param)) {
|
||||
return param.map(prepare)
|
||||
}
|
||||
|
||||
const values = {}
|
||||
Object.keys(param).forEach(key => {
|
||||
const value = param[key]
|
||||
if (value !== undefined) {
|
||||
values[key] = prepare(value)
|
||||
}
|
||||
})
|
||||
return values
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
import makeError from 'make-error'
|
||||
|
||||
export const UnsupportedTransport = makeError('UnsupportedTransport')
|
||||
@@ -1,7 +1,7 @@
|
||||
import jsonRpc from './json-rpc'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
import xmlRpc from './xml-rpc'
|
||||
import xmlRpcJson from './xml-rpc-json'
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
|
||||
const factories = [jsonRpc, xmlRpcJson, xmlRpc]
|
||||
const { length } = factories
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import httpRequestPlus from 'http-request-plus'
|
||||
import { format, parse } from 'json-rpc-protocol'
|
||||
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
|
||||
export default ({ allowUnauthorized, url }) => {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
const logError = error => {
|
||||
if (error.res) {
|
||||
@@ -71,10 +72,7 @@ const parseResult = result => {
|
||||
throw new UnsupportedTransport()
|
||||
}
|
||||
|
||||
export default ({
|
||||
allowUnauthorized,
|
||||
url: { hostname, path, port, protocol },
|
||||
}) => {
|
||||
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
|
||||
const client = (protocol === 'https:' ? createSecureClient : createClient)({
|
||||
host: hostname,
|
||||
path: '/json',
|
||||
@@ -83,5 +81,6 @@ export default ({
|
||||
})
|
||||
const call = promisify(client.methodCall, client)
|
||||
|
||||
return (method, args) => call(method, args).then(parseResult, logError)
|
||||
return (method, args) =>
|
||||
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
|
||||
const logError = error => {
|
||||
if (error.res) {
|
||||
console.error(
|
||||
@@ -30,10 +32,7 @@ const parseResult = result => {
|
||||
return result.Value
|
||||
}
|
||||
|
||||
export default ({
|
||||
allowUnauthorized,
|
||||
url: { hostname, path, port, protocol },
|
||||
}) => {
|
||||
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
|
||||
const client = (protocol === 'https:' ? createSecureClient : createClient)({
|
||||
host: hostname,
|
||||
port,
|
||||
@@ -41,5 +40,6 @@ export default ({
|
||||
})
|
||||
const call = promisify(client.methodCall, client)
|
||||
|
||||
return (method, args) => call(method, args).then(parseResult, logError)
|
||||
return (method, args) =>
|
||||
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
|
||||
}
|
||||
|
||||
@@ -25,5 +25,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"xo-common": "^0.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"chalk": "^2.2.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"http-request-plus": "^0.7.2",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
@@ -43,7 +43,7 @@
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
@@ -64,6 +64,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import kindOf from 'kindof'
|
||||
import { BaseError } from 'make-error'
|
||||
import { EventEmitter } from 'events'
|
||||
import { forEach } from 'lodash'
|
||||
import { forOwn } from 'lodash'
|
||||
|
||||
import isEmpty from './is-empty'
|
||||
import isObject from './is-object'
|
||||
@@ -10,6 +10,7 @@ import isObject from './is-object'
|
||||
|
||||
const {
|
||||
create: createObject,
|
||||
keys,
|
||||
prototype: { hasOwnProperty },
|
||||
} = Object
|
||||
|
||||
@@ -63,6 +64,16 @@ export class NoSuchItem extends BaseError {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const assertValidKey = key => {
|
||||
if (!isValidKey(key)) {
|
||||
throw new InvalidKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
const isValidKey = key => typeof key === 'number' || typeof key === 'string'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export default class Collection extends EventEmitter {
|
||||
constructor() {
|
||||
super()
|
||||
@@ -71,7 +82,7 @@ export default class Collection extends EventEmitter {
|
||||
this._buffering = 0
|
||||
this._indexes = createObject(null)
|
||||
this._indexedItems = createObject(null)
|
||||
this._items = {} // createObject(null)
|
||||
this._items = createObject(null)
|
||||
this._size = 0
|
||||
}
|
||||
|
||||
@@ -113,7 +124,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
clear() {
|
||||
forEach(this._items, (_, key) => this._remove(key))
|
||||
keys(this._items).forEach(this._remove, this)
|
||||
}
|
||||
|
||||
remove(keyOrObjectWithId) {
|
||||
@@ -176,8 +187,7 @@ export default class Collection extends EventEmitter {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// Throws a NoSuchItem.
|
||||
this._assertHas(key)
|
||||
throw new NoSuchItem(key)
|
||||
}
|
||||
|
||||
has(key) {
|
||||
@@ -189,7 +199,7 @@ export default class Collection extends EventEmitter {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
createIndex(name, index) {
|
||||
const { _indexes: indexes } = this
|
||||
const indexes = this._indexes
|
||||
if (hasOwnProperty.call(indexes, name)) {
|
||||
throw new DuplicateIndex(name)
|
||||
}
|
||||
@@ -201,7 +211,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
deleteIndex(name) {
|
||||
const { _indexes: indexes } = this
|
||||
const indexes = this._indexes
|
||||
if (!hasOwnProperty.call(indexes, name)) {
|
||||
throw new NoSuchIndex(name)
|
||||
}
|
||||
@@ -218,7 +228,7 @@ export default class Collection extends EventEmitter {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
*[Symbol.iterator]() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield [key, items[key]]
|
||||
@@ -226,7 +236,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
*keys() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield key
|
||||
@@ -234,7 +244,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
*values() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield items[key]
|
||||
@@ -255,11 +265,11 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
called = true
|
||||
|
||||
if (--this._buffering) {
|
||||
if (--this._buffering !== 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const { _buffer: buffer } = this
|
||||
const buffer = this._buffer
|
||||
|
||||
// Due to deduplication there could be nothing in the buffer.
|
||||
if (isEmpty(buffer)) {
|
||||
@@ -276,7 +286,7 @@ export default class Collection extends EventEmitter {
|
||||
data[buffer[key]][key] = this._items[key]
|
||||
}
|
||||
|
||||
forEach(data, (items, action) => {
|
||||
forOwn(data, (items, action) => {
|
||||
if (!isEmpty(items)) {
|
||||
this.emit(action, items)
|
||||
}
|
||||
@@ -306,16 +316,6 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
_assertValidKey(key) {
|
||||
if (!this._isValidKey(key)) {
|
||||
throw new InvalidKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
_isValidKey(key) {
|
||||
return typeof key === 'number' || typeof key === 'string'
|
||||
}
|
||||
|
||||
_remove(key) {
|
||||
delete this._items[key]
|
||||
this._size--
|
||||
@@ -324,17 +324,17 @@ export default class Collection extends EventEmitter {
|
||||
|
||||
_resolveItem(keyOrObjectWithId, valueIfKey = undefined) {
|
||||
if (valueIfKey !== undefined) {
|
||||
this._assertValidKey(keyOrObjectWithId)
|
||||
assertValidKey(keyOrObjectWithId)
|
||||
|
||||
return [keyOrObjectWithId, valueIfKey]
|
||||
}
|
||||
|
||||
if (this._isValidKey(keyOrObjectWithId)) {
|
||||
if (isValidKey(keyOrObjectWithId)) {
|
||||
return [keyOrObjectWithId]
|
||||
}
|
||||
|
||||
const key = this.getKey(keyOrObjectWithId)
|
||||
this._assertValidKey(key)
|
||||
assertValidKey(key)
|
||||
|
||||
return [key, keyOrObjectWithId]
|
||||
}
|
||||
@@ -347,7 +347,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
if (action === ACTION_ADD) {
|
||||
this._buffer[key] = this._buffer[key] ? ACTION_UPDATE : ACTION_ADD
|
||||
this._buffer[key] = key in this._buffer ? ACTION_UPDATE : ACTION_ADD
|
||||
} else if (action === ACTION_REMOVE) {
|
||||
if (this._buffer[key] === ACTION_ADD) {
|
||||
delete this._buffer[key]
|
||||
@@ -356,7 +356,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
} else {
|
||||
// update
|
||||
if (!this._buffer[key]) {
|
||||
if (!(key in this._buffer)) {
|
||||
this._buffer[key] = ACTION_UPDATE
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"csv-parser": "^2.1.0",
|
||||
@@ -43,7 +43,7 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^10.12.2",
|
||||
"@types/node": "^12.0.2",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
@@ -55,6 +55,7 @@
|
||||
"lint": "tslint 'src/*.ts'",
|
||||
"posttest": "yarn run lint",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node dist/index.js"
|
||||
"start": "node dist/index.js",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
declare module 'csv-parser' {
|
||||
function csvParser(opts?: Object): any
|
||||
export = csvParser
|
||||
}
|
||||
|
||||
declare module 'exec-promise' {
|
||||
function execPromise(cb: (args: string[]) => any): void
|
||||
export = execPromise
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"jsonrpc-websocket-client": "^0.4.1",
|
||||
"jsonrpc-websocket-client": "^0.5.0",
|
||||
"lodash": "^4.17.2",
|
||||
"make-error": "^1.0.4"
|
||||
},
|
||||
@@ -49,6 +49,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,5 +41,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-google",
|
||||
"version": "0.2.0",
|
||||
"version": "0.2.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Google authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"passport-google-oauth20": "^1.0.0"
|
||||
"passport-google-oauth20": "^2.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"inquirer": "^6.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -55,5 +55,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.15.0",
|
||||
"version": "0.16.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,6 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -43,6 +44,8 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
@@ -55,5 +58,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, get, startCase } from 'lodash'
|
||||
import { forEach, groupBy, startCase } from 'lodash'
|
||||
import pkg from '../package'
|
||||
|
||||
const logger = createLogger('xo:xo-server-backup-reports')
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
|
||||
@@ -46,6 +49,9 @@ export const testSchema = {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const INDENT = ' '
|
||||
const UNKNOWN_ITEM = 'Unknown'
|
||||
|
||||
const ICON_FAILURE = '🚨'
|
||||
const ICON_INTERRUPTED = '⚠️'
|
||||
const ICON_SKIPPED = '⏩'
|
||||
@@ -60,7 +66,7 @@ const STATUS_ICON = {
|
||||
}
|
||||
|
||||
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
const createDateFormatter = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
@@ -86,10 +92,6 @@ const formatSpeed = (bytes, milliseconds) =>
|
||||
})
|
||||
: 'N/A'
|
||||
|
||||
const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
@@ -100,40 +102,114 @@ const isSkippedError = error =>
|
||||
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
|
||||
error.message === NO_SUCH_OBJECT_ERROR
|
||||
|
||||
const INDENT = ' '
|
||||
const createGetTemporalDataMarkdown = formatDate => (
|
||||
start,
|
||||
end,
|
||||
nbIndent = 0
|
||||
) => {
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
// ===================================================================
|
||||
|
||||
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
|
||||
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
|
||||
const TITLE_BY_STATUS = {
|
||||
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
|
||||
interrupted: n => `## ${n} Interrupted`,
|
||||
skipped: n => `## ${n} Skipped`,
|
||||
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
|
||||
}
|
||||
|
||||
const getTemporalDataMarkdown = (end, start, formatDate) => {
|
||||
const markdown = [`- **Start time**: ${formatDate(start)}`]
|
||||
if (end !== undefined) {
|
||||
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
|
||||
markdown.push(`- **End time**: ${formatDate(end)}`)
|
||||
const duration = end - start
|
||||
if (duration >= 1) {
|
||||
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
|
||||
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
|
||||
}
|
||||
}
|
||||
return markdown
|
||||
}
|
||||
|
||||
const addWarnings = (text, warnings, nbIndent = 0) => {
|
||||
if (warnings === undefined) {
|
||||
const getWarningsMarkdown = (warnings = []) =>
|
||||
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
|
||||
|
||||
const getErrorMarkdown = task => {
|
||||
let message
|
||||
if (
|
||||
task.status === 'success' ||
|
||||
(message = task.result?.message ?? task.result?.code) === undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
warnings.forEach(({ message }) => {
|
||||
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
|
||||
})
|
||||
const label = task.status === 'skipped' ? 'Reason' : 'Error'
|
||||
return `- **${label}**: ${message}`
|
||||
}
|
||||
|
||||
const MARKDOWN_BY_TYPE = {
|
||||
pool(task, { formatDate }) {
|
||||
const { pool, poolMaster = {} } = task.data
|
||||
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
|
||||
|
||||
return {
|
||||
body: [
|
||||
`- **UUID**: ${pool.uuid}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[pool] ${name}`,
|
||||
}
|
||||
},
|
||||
xo(task, { formatDate, jobName }) {
|
||||
return {
|
||||
body: [
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[XO] ${jobName}`,
|
||||
}
|
||||
},
|
||||
async remote(task, { formatDate, xo }) {
|
||||
const id = task.data.id
|
||||
const name = await xo.getRemote(id).then(
|
||||
({ name }) => name,
|
||||
error => {
|
||||
logger.warn(error)
|
||||
return UNKNOWN_ITEM
|
||||
}
|
||||
)
|
||||
return {
|
||||
body: [
|
||||
`- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[remote] ${name}`,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const getMarkdown = (task, props) =>
|
||||
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
|
||||
|
||||
const toMarkdown = parts => {
|
||||
const lines = []
|
||||
let indentLevel = 0
|
||||
|
||||
const helper = part => {
|
||||
if (typeof part === 'string') {
|
||||
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
|
||||
} else if (Array.isArray(part)) {
|
||||
++indentLevel
|
||||
part.forEach(helper)
|
||||
--indentLevel
|
||||
}
|
||||
}
|
||||
helper(parts)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class BackupReportsXoPlugin {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
this._report = this._wrapper.bind(this)
|
||||
this._report = this._report.bind(this)
|
||||
}
|
||||
|
||||
configure({ toMails, toXmpp }) {
|
||||
@@ -146,72 +222,174 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
test({ runId }) {
|
||||
return this._backupNgListener(undefined, undefined, undefined, runId)
|
||||
return this._report(runId, undefined, true)
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper(status, job, schedule, runJobId) {
|
||||
if (job.type === 'metadataBackup') {
|
||||
return
|
||||
}
|
||||
async _report(runJobId, { type, status } = {}, force) {
|
||||
const xo = this._xo
|
||||
try {
|
||||
if (type === 'call') {
|
||||
return this._legacyVmHandler(status)
|
||||
}
|
||||
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule, runJobId)
|
||||
: this._listener(status, job, schedule, runJobId)
|
||||
)
|
||||
).catch(logError)
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
}
|
||||
|
||||
const reportWhen = log.data.reportWhen
|
||||
if (
|
||||
!force &&
|
||||
(reportWhen === 'never' ||
|
||||
// Handle improper value introduced by:
|
||||
// https://github.com/vatesfr/xen-orchestra/commit/753ee994f2948bbaca9d3161eaab82329a682773#diff-9c044ab8a42ed6576ea927a64c1ec3ebR105
|
||||
reportWhen === 'Never' ||
|
||||
(reportWhen === 'failure' && log.status === 'success'))
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const [job, schedule] = await Promise.all([
|
||||
await xo.getJob(log.jobId),
|
||||
await xo.getSchedule(log.scheduleId).catch(error => {
|
||||
logger.warn(error)
|
||||
}),
|
||||
])
|
||||
|
||||
if (job.type === 'backup') {
|
||||
return this._ngVmHandler(log, job, schedule, force)
|
||||
} else if (job.type === 'metadataBackup') {
|
||||
return this._metadataHandler(log, job, schedule, force)
|
||||
}
|
||||
|
||||
throw new Error(`Unknown backup job type: ${job.type}`)
|
||||
} catch (error) {
|
||||
logger.warn(error)
|
||||
}
|
||||
}
|
||||
|
||||
async _backupNgListener(_1, _2, schedule, runJobId) {
|
||||
async _metadataHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const tasksByStatus = groupBy(log.tasks, 'status')
|
||||
const n = log.tasks?.length ?? 0
|
||||
const nSuccesses = tasksByStatus.success?.length ?? 0
|
||||
|
||||
if (!force && log.data.reportWhen === 'failure') {
|
||||
delete tasksByStatus.success
|
||||
}
|
||||
|
||||
// header
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Job name**: ${jobName}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
getErrorMarkdown(log),
|
||||
]
|
||||
|
||||
const nagiosText = []
|
||||
|
||||
// body
|
||||
for (const status of STATUS) {
|
||||
const tasks = tasksByStatus[status]
|
||||
if (tasks === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
// tasks header
|
||||
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
|
||||
|
||||
// tasks body
|
||||
for (const task of tasks) {
|
||||
const taskMarkdown = await getMarkdown(task, {
|
||||
formatDate,
|
||||
jobName: log.jobName,
|
||||
})
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const { title, body } = taskMarkdown
|
||||
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
|
||||
|
||||
if (task.status !== 'success') {
|
||||
nagiosText.push(`[${task.status}] ${title}`)
|
||||
}
|
||||
|
||||
for (const subTask of task.tasks ?? []) {
|
||||
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTask.status]
|
||||
const { title, body } = taskMarkdown
|
||||
subMarkdown.push([
|
||||
`- **${title}** ${icon}`,
|
||||
[...body, ...getWarningsMarkdown(subTask.warnings)],
|
||||
])
|
||||
}
|
||||
markdown.push('', '', `### ${title}`, ...subMarkdown)
|
||||
}
|
||||
}
|
||||
|
||||
// footer
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${log.status} − Metadata backup report for ${
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${
|
||||
log.jobName
|
||||
}`
|
||||
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
|
||||
log.jobName
|
||||
} - ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
if (
|
||||
reportWhen === 'never' ||
|
||||
(log.status === 'success' && reportWhen === 'failure')
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
if (schedule === undefined) {
|
||||
schedule = await xo.getSchedule(log.scheduleId)
|
||||
}
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const jobName = (await xo.getJob(log.jobId, 'backup')).name
|
||||
const formatDate = createDateFormater(schedule.timezone)
|
||||
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
|
||||
|
||||
if (
|
||||
(log.status === 'failure' || log.status === 'skipped') &&
|
||||
log.result !== undefined
|
||||
) {
|
||||
let markdown = [
|
||||
if (log.tasks === undefined) {
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
`- **Error**: ${log.result.message}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
getErrorMarkdown(log),
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
@@ -231,7 +409,7 @@ class BackupReportsXoPlugin {
|
||||
let nSkipped = 0
|
||||
let nInterrupted = 0
|
||||
for (const taskLog of log.tasks) {
|
||||
if (taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -244,16 +422,16 @@ class BackupReportsXoPlugin {
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
|
||||
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
|
||||
...getWarningsMarkdown(taskLog.warnings),
|
||||
]
|
||||
addWarnings(text, taskLog.warnings)
|
||||
|
||||
const failedSubTasks = []
|
||||
const snapshotText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
|
||||
for (const subTaskLog of taskLog.tasks || []) {
|
||||
for (const subTaskLog of taskLog.tasks ?? []) {
|
||||
if (
|
||||
subTaskLog.message !== 'export' &&
|
||||
subTaskLog.message !== 'snapshot'
|
||||
@@ -262,29 +440,36 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTaskLog.status]
|
||||
const errorMessage = ` - **Error**: ${get(
|
||||
subTaskLog.result,
|
||||
'message'
|
||||
)}`
|
||||
const type = subTaskLog.data?.type
|
||||
const errorMarkdown = getErrorMarkdown(subTaskLog)
|
||||
|
||||
if (subTaskLog.message === 'snapshot') {
|
||||
snapshotText.push(
|
||||
`- **Snapshot** ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
|
||||
)
|
||||
} else if (subTaskLog.data.type === 'remote') {
|
||||
snapshotText.push(`- **Snapshot** ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
])
|
||||
} else if (type === 'remote') {
|
||||
const id = subTaskLog.data.id
|
||||
const remote = await xo.getRemote(id).catch(() => {})
|
||||
remotesText.push(
|
||||
` - **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${id}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(remotesText, subTaskLog.warnings, 2)
|
||||
const remote = await xo.getRemote(id).catch(error => {
|
||||
logger.warn(error)
|
||||
})
|
||||
const title = remote !== undefined ? remote.name : `Remote Not found`
|
||||
|
||||
remotesText.push(`- **${title}** (${id}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : id)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const id = subTaskLog.data.id
|
||||
@@ -294,14 +479,17 @@ class BackupReportsXoPlugin {
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
|
||||
srsText.push(
|
||||
` - **${srName}** (${srUuid}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(srsText, subTaskLog.warnings, 2)
|
||||
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,53 +501,48 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const operationInfoText = []
|
||||
addWarnings(operationInfoText, operationLog.warnings, 3)
|
||||
if (operationLog.status === 'success') {
|
||||
const size = operationLog.result.size
|
||||
const size = operationLog.result?.size
|
||||
if (size > 0) {
|
||||
if (operationLog.message === 'merge') {
|
||||
globalMergeSize += size
|
||||
} else {
|
||||
globalTransferSize += size
|
||||
}
|
||||
}
|
||||
|
||||
operationInfoText.push(
|
||||
` - **Size**: ${formatSize(size)}`,
|
||||
` - **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`
|
||||
)
|
||||
} else if (get(operationLog.result, 'message') !== undefined) {
|
||||
operationInfoText.push(
|
||||
` - **Error**: ${get(operationLog.result, 'message')}`
|
||||
)
|
||||
}
|
||||
const operationText = [
|
||||
` - **${operationLog.message}** ${
|
||||
STATUS_ICON[operationLog.status]
|
||||
}`,
|
||||
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
|
||||
...operationInfoText,
|
||||
].join('\n')
|
||||
if (get(subTaskLog, 'data.type') === 'remote') {
|
||||
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
|
||||
[
|
||||
...getTemporalDataMarkdown(
|
||||
operationLog.end,
|
||||
operationLog.start,
|
||||
formatDate
|
||||
),
|
||||
size > 0 && `- **Size**: ${formatSize(size)}`,
|
||||
size > 0 &&
|
||||
`- **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`,
|
||||
...getWarningsMarkdown(operationLog.warnings),
|
||||
getErrorMarkdown(operationLog),
|
||||
],
|
||||
]
|
||||
if (type === 'remote') {
|
||||
remotesText.push(operationText)
|
||||
remotesText.join('\n')
|
||||
}
|
||||
if (get(subTaskLog, 'data.type') === 'SR') {
|
||||
} else if (type === 'SR') {
|
||||
srsText.push(operationText)
|
||||
srsText.join('\n')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`- **SRs**`)
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`- **Remotes**`)
|
||||
}
|
||||
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
|
||||
const subText = [
|
||||
...snapshotText,
|
||||
srsText.length !== 0 && `- **SRs**`,
|
||||
srsText,
|
||||
remotesText.length !== 0 && `- **Remotes**`,
|
||||
remotesText,
|
||||
]
|
||||
if (taskLog.result !== undefined) {
|
||||
if (taskLog.status === 'skipped') {
|
||||
++nSkipped
|
||||
@@ -369,8 +552,7 @@ class BackupReportsXoPlugin {
|
||||
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: taskLog.result.message
|
||||
}`,
|
||||
''
|
||||
}`
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -379,11 +561,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(
|
||||
...text,
|
||||
`- **Error**: ${taskLog.result.message}`,
|
||||
''
|
||||
)
|
||||
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -394,7 +572,7 @@ class BackupReportsXoPlugin {
|
||||
} else {
|
||||
if (taskLog.status === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
failedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
@@ -402,37 +580,34 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else if (taskLog.status === 'interrupted') {
|
||||
++nInterrupted
|
||||
interruptedVmsText.push(...text, '', '', ...subText, '')
|
||||
interruptedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
successfulVmsText.push(...text, ...subText)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nVms = log.tasks.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
|
||||
let markdown = [
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
globalTransferSize !== 0 &&
|
||||
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
|
||||
globalMergeSize !== 0 &&
|
||||
`- **Merge size**: ${formatSize(globalMergeSize)}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'',
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
@@ -457,7 +632,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
@@ -468,9 +643,8 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
@@ -510,9 +684,9 @@ class BackupReportsXoPlugin {
|
||||
])
|
||||
}
|
||||
|
||||
_listener(status) {
|
||||
_legacyVmHandler(status) {
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
const formatDate = createDateFormatter(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
|
||||
@@ -32,8 +32,8 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.7.2",
|
||||
"jsonrpc-websocket-client": "^0.4.1"
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -44,5 +44,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -42,5 +42,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"nodemailer": "^5.0.0",
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"slack-node": "^0.1.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.7.1",
|
||||
"version": "0.7.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -39,10 +39,10 @@
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^3.5.8",
|
||||
"html-minifier": "^4.0.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -59,5 +59,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -494,7 +494,7 @@ async function getHostsMissingPatches({ runningHosts, xo }) {
|
||||
map(runningHosts, async host => {
|
||||
let hostsPatches = await xo
|
||||
.getXapi(host)
|
||||
.listMissingPoolPatchesOnHost(host._xapiId)
|
||||
.listMissingPatches(host._xapiId)
|
||||
.catch(error => {
|
||||
console.error(
|
||||
'[WARN] error on fetching hosts missing patches:',
|
||||
|
||||
@@ -4,6 +4,11 @@
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// https://expressjs.com/en/advanced/best-practice-performance.html#set-node_env-to-production
|
||||
if (process.env.NODE_ENV === undefined) {
|
||||
process.env.NODE_ENV = 'production'
|
||||
}
|
||||
|
||||
// Better stack traces if possible.
|
||||
require('../better-stacks')
|
||||
|
||||
|
||||