Compare commits
1 Commits
feat_check
...
lite/human
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2243805d01 |
@@ -28,7 +28,7 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['*.{integ,spec,test}.{,c,m}js'],
|
||||
files: ['*.{spec,test}.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-unpublished-require': 'off',
|
||||
'n/no-unpublished-import': 'off',
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = {
|
||||
arrowParens: 'avoid',
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,65 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/diff
|
||||
|
||||
[](https://npmjs.org/package/@vates/diff)  [](https://bundlephobia.com/result?p=@vates/diff) [](https://npmjs.org/package/@vates/diff)
|
||||
|
||||
> Computes differences between two arrays, buffers or strings
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/diff):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/diff
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,37 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
/**
|
||||
* Compare two data arrays, buffers or strings and invoke the provided callback function for each difference.
|
||||
*
|
||||
* @template {Array|Buffer|string} T
|
||||
* @param {Array|Buffer|string} data1 - The first data array or buffer to compare.
|
||||
* @param {T} data2 - The second data array or buffer to compare.
|
||||
* @param {(index: number, diff: T) => void} [cb] - The callback function to invoke for each difference. If not provided, an array of differences will be returned.
|
||||
* @returns {Array<number|T>|undefined} - An array of differences if no callback is provided, otherwise undefined.
|
||||
*/
|
||||
module.exports = function diff(data1, data2, cb) {
|
||||
let result
|
||||
if (cb === undefined) {
|
||||
result = []
|
||||
cb = result.push.bind(result)
|
||||
}
|
||||
|
||||
const n1 = data1.length
|
||||
const n2 = data2.length
|
||||
const n = Math.min(n1, n2)
|
||||
for (let i = 0; i < n; ++i) {
|
||||
if (data1[i] !== data2[i]) {
|
||||
let j = i + 1
|
||||
while (j < n && data1[j] !== data2[j]) {
|
||||
++j
|
||||
}
|
||||
cb(i, data2.slice(i, j))
|
||||
i = j
|
||||
}
|
||||
}
|
||||
if (n1 !== n2) {
|
||||
cb(n, n1 < n2 ? data2.slice(n) : data2.slice(0, 0))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert/strict')
|
||||
const test = require('test')
|
||||
|
||||
const diff = require('./index.js')
|
||||
|
||||
test('data of equal length', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz', 8, 'foo'])
|
||||
})
|
||||
|
||||
test('data1 is longer', function () {
|
||||
const data1 = 'foo bar'
|
||||
const data2 = 'foo'
|
||||
assert.deepEqual(diff(data1, data2), [3, ''])
|
||||
})
|
||||
|
||||
test('data2 is longer', function () {
|
||||
const data1 = 'foo'
|
||||
const data2 = 'foo bar'
|
||||
assert.deepEqual(diff(data1, data2), [3, ' bar'])
|
||||
})
|
||||
|
||||
test('with arrays', function () {
|
||||
const data1 = 'foo bar baz'.split('')
|
||||
const data2 = 'baz bar foo'.split('')
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz'.split(''), 8, 'foo'.split('')])
|
||||
})
|
||||
|
||||
test('with buffers', function () {
|
||||
const data1 = Buffer.from('foo bar baz')
|
||||
const data2 = Buffer.from('baz bar foo')
|
||||
assert.deepEqual(diff(data1, data2), [0, Buffer.from('baz'), 8, Buffer.from('foo')])
|
||||
})
|
||||
|
||||
test('cb param', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
|
||||
const calls = []
|
||||
const cb = (...args) => calls.push(args)
|
||||
|
||||
diff(data1, data2, cb)
|
||||
|
||||
assert.deepEqual(calls, [
|
||||
[0, 'baz'],
|
||||
[8, 'foo'],
|
||||
])
|
||||
})
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/diff",
|
||||
"description": "Computes differences between two arrays, buffers or strings",
|
||||
"keywords": [
|
||||
"array",
|
||||
"binary",
|
||||
"buffer",
|
||||
"diff",
|
||||
"differences",
|
||||
"string"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/diff",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/diff",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
import LRU from 'lru-cache'
|
||||
import Fuse from 'fuse-native'
|
||||
import { VhdSynthetic } from 'vhd-lib'
|
||||
import { Disposable, fromCallback } from 'promise-toolbox'
|
||||
'use strict'
|
||||
|
||||
const LRU = require('lru-cache')
|
||||
const Fuse = require('fuse-native')
|
||||
const { VhdSynthetic } = require('vhd-lib')
|
||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
||||
|
||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||
const stat = st => ({
|
||||
@@ -14,7 +16,7 @@ const stat = st => ({
|
||||
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
||||
})
|
||||
|
||||
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
||||
|
||||
const cache = new LRU({
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "2.0.0",
|
||||
"version": "1.0.0",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -15,14 +15,13 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
"node": ">=10.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.5.0"
|
||||
"vhd-lib": "^4.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
42
@vates/nbd-client/constants.js
Normal file
42
@vates/nbd-client/constants.js
Normal file
@@ -0,0 +1,42 @@
|
||||
'use strict'
|
||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
exports.NBD_OPT_EXPORT_NAME = 1
|
||||
exports.NBD_OPT_ABORT = 2
|
||||
exports.NBD_OPT_LIST = 3
|
||||
exports.NBD_OPT_STARTTLS = 5
|
||||
exports.NBD_OPT_INFO = 6
|
||||
exports.NBD_OPT_GO = 7
|
||||
|
||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
exports.NBD_CMD_READ = 0
|
||||
exports.NBD_CMD_WRITE = 1
|
||||
exports.NBD_CMD_DISC = 2
|
||||
exports.NBD_CMD_FLUSH = 3
|
||||
exports.NBD_CMD_TRIM = 4
|
||||
exports.NBD_CMD_CACHE = 5
|
||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
||||
exports.NBD_CMD_RESIZE = 8
|
||||
|
||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
exports.NBD_REPLY_ACK = 1
|
||||
|
||||
exports.NBD_DEFAULT_PORT = 10809
|
||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,41 +0,0 @@
|
||||
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
export const NBD_OPT_EXPORT_NAME = 1
|
||||
export const NBD_OPT_ABORT = 2
|
||||
export const NBD_OPT_LIST = 3
|
||||
export const NBD_OPT_STARTTLS = 5
|
||||
export const NBD_OPT_INFO = 6
|
||||
export const NBD_OPT_GO = 7
|
||||
|
||||
export const NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
export const NBD_FLAG_READ_ONLY = 1 << 1
|
||||
export const NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
export const NBD_FLAG_SEND_FUA = 1 << 3
|
||||
export const NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
export const NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
export const NBD_CMD_FLAG_FUA = 1 << 0
|
||||
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
export const NBD_CMD_FLAG_DF = 1 << 2
|
||||
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
export const NBD_CMD_READ = 0
|
||||
export const NBD_CMD_WRITE = 1
|
||||
export const NBD_CMD_DISC = 2
|
||||
export const NBD_CMD_FLUSH = 3
|
||||
export const NBD_CMD_TRIM = 4
|
||||
export const NBD_CMD_CACHE = 5
|
||||
export const NBD_CMD_WRITE_ZEROES = 6
|
||||
export const NBD_CMD_BLOCK_STATUS = 7
|
||||
export const NBD_CMD_RESIZE = 8
|
||||
|
||||
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
export const NBD_REPLY_ACK = 1
|
||||
|
||||
export const NBD_DEFAULT_PORT = 10809
|
||||
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,11 +1,8 @@
|
||||
import assert from 'node:assert'
|
||||
import { Socket } from 'node:net'
|
||||
import { connect } from 'node:tls'
|
||||
import { fromCallback, pRetry, pDelay, pTimeout } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import {
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
const { Socket } = require('node:net')
|
||||
const { connect } = require('node:tls')
|
||||
const {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_BLOCK_SIZE,
|
||||
@@ -20,13 +17,13 @@ import {
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} from './constants.mjs'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
} = require('./constants.js')
|
||||
const { fromCallback } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
export default class NbdClient {
|
||||
module.exports = class NbdClient {
|
||||
#serverAddress
|
||||
#serverCert
|
||||
#serverPort
|
||||
@@ -35,34 +32,18 @@ export default class NbdClient {
|
||||
#exportName
|
||||
#exportSize
|
||||
|
||||
#waitBeforeReconnect
|
||||
#readAhead
|
||||
#readBlockRetries
|
||||
#reconnectRetry
|
||||
#connectTimeout
|
||||
|
||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||
|
||||
#waitingForResponse // there is already a listenner waiting for a response
|
||||
#nextCommandQueryId = BigInt(0)
|
||||
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
|
||||
#connected = false
|
||||
|
||||
#reconnectingPromise
|
||||
constructor(
|
||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
||||
) {
|
||||
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
|
||||
this.#serverAddress = address
|
||||
this.#serverPort = port
|
||||
this.#exportName = exportname
|
||||
this.#serverCert = cert
|
||||
this.#waitBeforeReconnect = waitBeforeReconnect
|
||||
this.#readAhead = readAhead
|
||||
this.#readBlockRetries = readBlockRetries
|
||||
this.#reconnectRetry = reconnectRetry
|
||||
this.#connectTimeout = connectTimeout
|
||||
}
|
||||
|
||||
get exportSize() {
|
||||
@@ -97,55 +78,24 @@ export default class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async #connect() {
|
||||
// first we connect to the server without tls, and then we upgrade the connection
|
||||
async connect() {
|
||||
// first we connect to the serve without tls, and then we upgrade the connection
|
||||
// to tls during the handshake
|
||||
await this.#unsecureConnect()
|
||||
await this.#handshake()
|
||||
this.#connected = true
|
||||
|
||||
// reset internal state if we reconnected a nbd client
|
||||
this.#commandQueryBacklog = new Map()
|
||||
this.#waitingForResponse = false
|
||||
}
|
||||
async connect() {
|
||||
return pTimeout.call(this.#connect(), this.#connectTimeout)
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||
await this.#write(buffer)
|
||||
await this.#serverSocket.destroy()
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
}
|
||||
|
||||
#clearReconnectPromise = () => {
|
||||
this.#reconnectingPromise = undefined
|
||||
}
|
||||
|
||||
async #reconnect() {
|
||||
await this.disconnect().catch(() => {})
|
||||
await pDelay(this.#waitBeforeReconnect) // need to let the xapi clean things on its side
|
||||
await this.connect()
|
||||
}
|
||||
|
||||
async reconnect() {
|
||||
// we need to ensure reconnections do not occur in parallel
|
||||
if (this.#reconnectingPromise === undefined) {
|
||||
this.#reconnectingPromise = pRetry(() => this.#reconnect(), {
|
||||
tries: this.#reconnectRetry,
|
||||
})
|
||||
this.#reconnectingPromise.then(this.#clearReconnectPromise, this.#clearReconnectPromise)
|
||||
}
|
||||
|
||||
return this.#reconnectingPromise
|
||||
}
|
||||
|
||||
// we can use individual read/write from the socket here since there is no concurrency
|
||||
@@ -223,6 +173,7 @@ export default class NbdClient {
|
||||
this.#commandQueryBacklog.forEach(({ reject }) => {
|
||||
reject(error)
|
||||
})
|
||||
await this.disconnect()
|
||||
}
|
||||
|
||||
async #readBlockResponse() {
|
||||
@@ -230,6 +181,7 @@ export default class NbdClient {
|
||||
if (this.#waitingForResponse) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const magic = await this.#readInt32()
|
||||
@@ -254,8 +206,7 @@ export default class NbdClient {
|
||||
query.resolve(data)
|
||||
this.#waitingForResponse = false
|
||||
if (this.#commandQueryBacklog.size > 0) {
|
||||
// it doesn't throw directly but will throw all relevant promise on failure
|
||||
this.#readBlockResponse()
|
||||
await this.#readBlockResponse()
|
||||
}
|
||||
} catch (error) {
|
||||
// reject all the promises
|
||||
@@ -266,11 +217,6 @@ export default class NbdClient {
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
}
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
|
||||
@@ -285,67 +231,19 @@ export default class NbdClient {
|
||||
buffer.writeInt32BE(size, 24)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
function decoratedReject(error) {
|
||||
error.index = index
|
||||
error.size = size
|
||||
reject(error)
|
||||
}
|
||||
|
||||
// this will handle one block response, but it can be another block
|
||||
// since server does not guaranty to handle query in order
|
||||
this.#commandQueryBacklog.set(queryId, {
|
||||
size,
|
||||
resolve,
|
||||
reject: decoratedReject,
|
||||
reject,
|
||||
})
|
||||
// really send the command to the server
|
||||
this.#write(buffer).catch(decoratedReject)
|
||||
this.#write(buffer).catch(reject)
|
||||
|
||||
// #readBlockResponse never throws directly
|
||||
// but if it fails it will reject all the promises in the backlog
|
||||
this.#readBlockResponse()
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
|
||||
for (let index = 0; BigInt(index) < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
76
@vates/nbd-client/nbdclient.spec.js
Normal file
76
@vates/nbd-client/nbdclient.spec.js
Normal file
@@ -0,0 +1,76 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -13,18 +13,16 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "2.0.0",
|
||||
"version": "1.0.1",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@vates/read-chunk": "^1.0.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^1.3.4"
|
||||
"xen-api": "^1.2.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
@@ -32,6 +30,6 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
|
||||
"test-integration": "tap *.spec.js"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
Public Key Info:
|
||||
Public Key Algorithm: RSA
|
||||
Key Security Level: High (3072 bits)
|
||||
|
||||
modulus:
|
||||
00:be:92:be:df:de:0a:ab:38:fc:1a:c0:1a:58:4d:86
|
||||
b8:1f:25:10:7d:19:05:17:bf:02:3d:e9:ef:f8:c0:04
|
||||
5d:6f:98:de:5c:dd:c3:0f:e2:61:61:e4:b5:9c:42:ac
|
||||
3e:af:fd:30:10:e1:54:32:66:75:f6:80:90:85:05:a0
|
||||
6a:14:a2:6f:a7:2e:f0:f3:52:94:2a:f2:34:fc:0d:b4
|
||||
fb:28:5d:1c:11:5c:59:6e:63:34:ba:b3:fd:73:b1:48
|
||||
35:00:84:53:da:6a:9b:84:ab:64:b1:a1:2b:3a:d1:5a
|
||||
d7:13:7c:12:2a:4e:72:e9:96:d6:30:74:c5:71:05:14
|
||||
4b:2d:01:94:23:67:4e:37:3c:1e:c1:a0:bc:34:04:25
|
||||
21:11:fb:4b:6b:53:74:8f:90:93:57:af:7f:3b:78:d6
|
||||
a4:87:fe:7d:ed:20:11:8b:70:54:67:b8:c9:f5:c0:6b
|
||||
de:4e:e7:a5:79:ff:f7:ad:cf:10:57:f5:51:70:7b:54
|
||||
68:28:9e:b9:c2:10:7b:ab:aa:11:47:9f:ec:e6:2f:09
|
||||
44:4a:88:5b:dd:8c:10:b4:c4:03:25:06:d9:e0:9f:a0
|
||||
0d:cf:94:4b:3b:fa:a5:17:2c:e4:67:c4:17:6a:ab:d8
|
||||
c8:7a:16:41:b9:91:b7:9c:ae:8c:94:be:26:61:51:71
|
||||
c1:a6:39:39:97:75:28:a9:0e:21:ea:f0:bd:71:4a:8c
|
||||
e1:f8:1d:a9:22:2f:10:a8:1b:e5:a4:9a:fd:0f:fa:c6
|
||||
20:bc:96:99:79:c6:ba:a4:1f:3e:d4:91:c5:af:bb:71
|
||||
0a:5a:ef:69:9c:64:69:ce:5a:fe:3f:c2:24:f4:26:d4
|
||||
3d:ab:ab:9a:f0:f6:f1:b1:64:a9:f4:e2:34:6a:ab:2e
|
||||
95:47:b9:07:5a:39:c6:95:9c:a9:e8:ed:71:dd:c1:21
|
||||
16:c8:2d:4c:2c:af:06:9d:c6:fa:fe:c5:2a:6c:b4:c3
|
||||
d5:96:fc:5e:fd:ec:1c:30:b4:9d:cb:29:ef:a8:50:1c
|
||||
21:
|
||||
|
||||
public exponent:
|
||||
01:00:01:
|
||||
|
||||
private exponent:
|
||||
25:37:c5:7d:35:01:02:65:73:9e:c9:cb:9b:59:30:a9
|
||||
3e:b3:df:5f:7f:06:66:97:d0:19:45:59:af:4b:d8:ce
|
||||
62:a0:09:35:3b:bd:ff:99:27:89:95:bf:fe:0f:6b:52
|
||||
26:ce:9c:97:7f:5a:11:29:bf:79:ef:ab:c9:be:ca:90
|
||||
4d:0d:58:1e:df:65:01:30:2c:6d:a2:b5:c4:4f:ec:fb
|
||||
6b:eb:9b:32:ac:c5:6e:70:83:78:be:f4:0d:a7:1e:c1
|
||||
f3:22:e4:b9:70:3e:85:0f:6f:ef:dc:d8:f3:78:b5:73
|
||||
f1:83:36:8c:fa:9b:28:91:63:ad:3c:f0:de:5c:ae:94
|
||||
eb:ea:36:03:20:06:bf:74:c7:50:eb:52:36:1a:65:21
|
||||
eb:40:17:7f:93:61:dd:33:d0:02:bc:ec:6d:31:f1:41
|
||||
5a:a9:d1:f0:00:66:4c:c4:18:47:d5:67:e3:cd:bb:83
|
||||
44:07:ab:62:83:21:dc:d8:e6:89:37:08:bb:9d:ea:62
|
||||
c2:5d:ce:85:c2:dc:48:27:0c:a4:23:61:b7:30:e7:26
|
||||
44:dc:1e:5c:2e:16:35:2b:2e:a6:e6:a4:ce:1f:9b:e9
|
||||
fe:96:fa:49:1d:fb:2a:df:bc:bf:46:da:52:f8:37:8a
|
||||
84:ab:e4:73:e6:46:56:b5:b4:3d:e1:63:eb:02:8e:d7
|
||||
67:96:c4:dc:28:6d:6b:b6:0c:a3:0b:db:87:29:ad:f9
|
||||
ec:73:b6:55:a3:40:32:13:84:c7:2f:33:74:04:dc:42
|
||||
00:11:9c:fb:fc:62:35:b3:82:c3:3c:28:80:e8:09:a8
|
||||
97:c7:c1:2e:3d:27:fa:4f:9b:fc:c2:34:58:41:5c:a1
|
||||
e2:70:2e:2f:82:ad:bd:bd:8e:dd:23:12:25:de:89:70
|
||||
60:75:48:90:80:ac:55:74:51:6f:49:9e:7f:63:41:8b
|
||||
3c:b1:f5:c3:6b:4b:5a:50:a6:4d:38:e8:82:c2:04:c8
|
||||
30:fd:06:9b:c1:04:27:b6:63:3a:5e:f5:4d:00:c3:d1
|
||||
|
||||
|
||||
prime1:
|
||||
00:f6:00:2e:7d:89:61:24:16:5e:87:ca:18:6c:03:b8
|
||||
b4:33:df:4a:a7:7f:db:ed:39:15:41:12:61:4f:4e:b4
|
||||
de:ab:29:d9:0c:6c:01:7e:53:2e:ee:e7:5f:a2:e4:6d
|
||||
c6:4b:07:4e:d8:a3:ae:45:06:97:bd:18:a3:e9:dd:29
|
||||
54:64:6d:f0:af:08:95:ae:ae:3e:71:63:76:2a:a1:18
|
||||
c4:b1:fc:bc:3d:42:15:74:b3:c5:38:1f:5d:92:f1:b2
|
||||
c6:3f:10:fe:35:1a:c6:b1:ce:70:38:ff:08:5c:de:61
|
||||
79:c7:50:91:22:4d:e9:c8:18:49:e2:5c:91:84:86:e2
|
||||
4d:0f:6e:9b:0d:81:df:aa:f3:59:75:56:e9:33:18:dd
|
||||
ab:39:da:e2:25:01:05:a1:6e:23:59:15:2c:89:35:c7
|
||||
ae:9c:c7:ea:88:9a:1a:f3:48:07:11:82:59:79:8c:62
|
||||
53:06:37:30:14:b3:82:b1:50:fc:ae:b8:f7:1c:57:44
|
||||
7d:
|
||||
|
||||
prime2:
|
||||
00:c6:51:cc:dc:88:2e:cf:98:90:10:19:e0:d3:a4:d1
|
||||
3f:dc:b0:29:d3:bb:26:ee:eb:00:17:17:d1:d1:bb:9b
|
||||
34:b1:4e:af:b5:6c:1c:54:53:b4:bb:55:da:f7:78:cd
|
||||
38:b4:2e:3a:8c:63:80:3b:64:9c:b4:2b:cd:dd:50:0b
|
||||
05:d2:00:7a:df:8e:c3:e6:29:e0:9c:d8:40:b7:11:09
|
||||
f4:38:df:f6:ed:93:1e:18:d4:93:fa:8d:ee:82:9c:0f
|
||||
c1:88:26:84:9d:4f:ae:8a:17:d5:55:54:4c:c6:0a:ac
|
||||
4d:ec:33:51:68:0f:4b:92:2e:04:57:fe:15:f5:00:46
|
||||
5c:8e:ad:09:2c:e7:df:d5:36:7a:4e:bd:da:21:22:d7
|
||||
58:b4:72:93:94:af:34:cc:e2:b8:d0:4f:0b:5d:97:08
|
||||
12:19:17:34:c5:15:49:00:48:56:13:b8:45:4e:3b:f8
|
||||
bc:d5:ab:d9:6d:c2:4a:cc:01:1a:53:4d:46:50:49:3b
|
||||
75:
|
||||
|
||||
coefficient:
|
||||
63:67:50:29:10:6a:85:a3:dc:51:90:20:76:86:8c:83
|
||||
8e:d5:ff:aa:75:fd:b5:f8:31:b0:96:6c:18:1d:5b:ed
|
||||
a4:2e:47:8d:9c:c2:1e:2c:a8:6d:4b:10:a5:c2:53:46
|
||||
8a:9a:84:91:d7:fc:f5:cc:03:ce:b9:3d:5c:01:d2:27
|
||||
99:7b:79:89:4f:a1:12:e3:05:5d:ee:10:f6:8c:e6:ce
|
||||
5e:da:32:56:6d:6f:eb:32:b4:75:7b:94:49:d8:2d:9e
|
||||
4d:19:59:2e:e4:0b:bc:95:df:df:65:67:a1:dd:c6:2b
|
||||
99:f4:76:e8:9f:fa:57:1d:ca:f9:58:a9:ce:9b:30:5c
|
||||
42:8a:ba:05:e7:e2:15:45:25:bc:e9:68:c1:8b:1a:37
|
||||
cc:e1:aa:45:2e:94:f5:81:47:1e:64:7f:c0:c1:b7:a8
|
||||
21:58:18:a9:a0:ed:e0:27:75:bf:65:81:6b:e4:1d:5a
|
||||
b7:7e:df:d8:28:c6:36:21:19:c8:6e:da:ca:9e:da:84
|
||||
|
||||
|
||||
exp1:
|
||||
00:ba:d7:fe:77:a9:0d:98:2c:49:56:57:c0:5e:e2:20
|
||||
ba:f6:1f:26:03:bc:d0:5d:08:9b:45:16:61:c4:ab:e2
|
||||
22:b1:dc:92:17:a6:3d:28:26:a4:22:1e:a8:7b:ff:86
|
||||
05:33:5d:74:9c:85:0d:cb:2d:ab:b8:9b:6b:7c:28:57
|
||||
c8:da:92:ca:59:17:6b:21:07:05:34:78:37:fb:3e:ea
|
||||
a2:13:12:04:23:7e:fa:ee:ed:cf:e0:c5:a9:fb:ff:0a
|
||||
2b:1b:21:9c:02:d7:b8:8c:ba:60:70:59:fc:8f:14:f4
|
||||
f2:5a:d9:ad:b2:61:7d:2c:56:8e:5f:98:b1:89:f8:2d
|
||||
10:1c:a5:84:ad:28:b4:aa:92:34:a3:34:04:e1:a3:84
|
||||
52:16:1a:52:e3:8a:38:2d:99:8a:cd:91:90:87:12:ca
|
||||
fc:ab:e6:08:14:03:00:6f:41:88:e4:da:9d:7c:fd:8c
|
||||
7c:c4:de:cb:ed:1d:3f:29:d0:7a:6b:76:df:71:ae:32
|
||||
bd:
|
||||
|
||||
exp2:
|
||||
4a:e9:d3:6c:ea:b4:64:0e:c9:3c:8b:c9:f5:a8:a8:b2
|
||||
6a:f6:d0:95:fe:78:32:7f:ea:c4:ce:66:9f:c7:32:55
|
||||
b1:34:7c:03:18:17:8b:73:23:2e:30:bc:4a:07:03:de
|
||||
8b:91:7a:e4:55:21:b7:4d:c6:33:f8:e8:06:d5:99:94
|
||||
55:43:81:26:b9:93:1e:7a:6b:32:54:2d:fd:f9:1d:bd
|
||||
77:4e:82:c4:33:72:87:06:a5:ef:5b:75:e1:38:7a:6b
|
||||
2c:b7:00:19:3c:64:3e:1d:ca:a4:34:f7:db:47:64:d6
|
||||
fa:86:58:15:ea:d1:2d:22:dc:d9:30:4d:b3:02:ab:91
|
||||
83:03:b2:17:98:6f:60:e6:f7:44:8f:4a:ba:81:a2:bf
|
||||
0b:4a:cc:9c:b9:a2:44:52:d0:65:3f:b6:97:5f:d9:d8
|
||||
9c:49:bb:d1:46:bd:10:b2:42:71:a8:85:e5:8b:99:e6
|
||||
1b:00:93:5d:76:ab:32:6c:a8:39:17:53:9c:38:4d:91
|
||||
|
||||
|
||||
|
||||
Public Key PIN:
|
||||
pin-sha256:ISh/UeFjUG5Gwrpx6hMUGQPvg9wOKjOkHmRbs4YjZqs=
|
||||
Public Key ID:
|
||||
sha256:21287f51e163506e46c2ba71ea13141903ef83dc0e2a33a41e645bb3862366ab
|
||||
sha1:1a48455111ac45fb5807c5cdb7b20b896c52f0b6
|
||||
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvpK+394Kqzj8GsAaWE2GuB8lEH0ZBRe/Aj3p7/jABF1vmN5c
|
||||
3cMP4mFh5LWcQqw+r/0wEOFUMmZ19oCQhQWgahSib6cu8PNSlCryNPwNtPsoXRwR
|
||||
XFluYzS6s/1zsUg1AIRT2mqbhKtksaErOtFa1xN8EipOcumW1jB0xXEFFEstAZQj
|
||||
Z043PB7BoLw0BCUhEftLa1N0j5CTV69/O3jWpIf+fe0gEYtwVGe4yfXAa95O56V5
|
||||
//etzxBX9VFwe1RoKJ65whB7q6oRR5/s5i8JREqIW92MELTEAyUG2eCfoA3PlEs7
|
||||
+qUXLORnxBdqq9jIehZBuZG3nK6MlL4mYVFxwaY5OZd1KKkOIerwvXFKjOH4Haki
|
||||
LxCoG+Wkmv0P+sYgvJaZeca6pB8+1JHFr7txClrvaZxkac5a/j/CJPQm1D2rq5rw
|
||||
9vGxZKn04jRqqy6VR7kHWjnGlZyp6O1x3cEhFsgtTCyvBp3G+v7FKmy0w9WW/F79
|
||||
7BwwtJ3LKe+oUBwhAgMBAAECggGAJTfFfTUBAmVznsnLm1kwqT6z319/BmaX0BlF
|
||||
Wa9L2M5ioAk1O73/mSeJlb/+D2tSJs6cl39aESm/ee+ryb7KkE0NWB7fZQEwLG2i
|
||||
tcRP7Ptr65syrMVucIN4vvQNpx7B8yLkuXA+hQ9v79zY83i1c/GDNoz6myiRY608
|
||||
8N5crpTr6jYDIAa/dMdQ61I2GmUh60AXf5Nh3TPQArzsbTHxQVqp0fAAZkzEGEfV
|
||||
Z+PNu4NEB6tigyHc2OaJNwi7nepiwl3OhcLcSCcMpCNhtzDnJkTcHlwuFjUrLqbm
|
||||
pM4fm+n+lvpJHfsq37y/RtpS+DeKhKvkc+ZGVrW0PeFj6wKO12eWxNwobWu2DKML
|
||||
24cprfnsc7ZVo0AyE4THLzN0BNxCABGc+/xiNbOCwzwogOgJqJfHwS49J/pPm/zC
|
||||
NFhBXKHicC4vgq29vY7dIxIl3olwYHVIkICsVXRRb0mef2NBizyx9cNrS1pQpk04
|
||||
6ILCBMgw/QabwQQntmM6XvVNAMPRAoHBAPYALn2JYSQWXofKGGwDuLQz30qnf9vt
|
||||
ORVBEmFPTrTeqynZDGwBflMu7udfouRtxksHTtijrkUGl70Yo+ndKVRkbfCvCJWu
|
||||
rj5xY3YqoRjEsfy8PUIVdLPFOB9dkvGyxj8Q/jUaxrHOcDj/CFzeYXnHUJEiTenI
|
||||
GEniXJGEhuJND26bDYHfqvNZdVbpMxjdqzna4iUBBaFuI1kVLIk1x66cx+qImhrz
|
||||
SAcRgll5jGJTBjcwFLOCsVD8rrj3HFdEfQKBwQDGUczciC7PmJAQGeDTpNE/3LAp
|
||||
07sm7usAFxfR0bubNLFOr7VsHFRTtLtV2vd4zTi0LjqMY4A7ZJy0K83dUAsF0gB6
|
||||
347D5ingnNhAtxEJ9Djf9u2THhjUk/qN7oKcD8GIJoSdT66KF9VVVEzGCqxN7DNR
|
||||
aA9Lki4EV/4V9QBGXI6tCSzn39U2ek692iEi11i0cpOUrzTM4rjQTwtdlwgSGRc0
|
||||
xRVJAEhWE7hFTjv4vNWr2W3CSswBGlNNRlBJO3UCgcEAutf+d6kNmCxJVlfAXuIg
|
||||
uvYfJgO80F0Im0UWYcSr4iKx3JIXpj0oJqQiHqh7/4YFM110nIUNyy2ruJtrfChX
|
||||
yNqSylkXayEHBTR4N/s+6qITEgQjfvru7c/gxan7/worGyGcAte4jLpgcFn8jxT0
|
||||
8lrZrbJhfSxWjl+YsYn4LRAcpYStKLSqkjSjNATho4RSFhpS44o4LZmKzZGQhxLK
|
||||
/KvmCBQDAG9BiOTanXz9jHzE3svtHT8p0Hprdt9xrjK9AoHASunTbOq0ZA7JPIvJ
|
||||
9aiosmr20JX+eDJ/6sTOZp/HMlWxNHwDGBeLcyMuMLxKBwPei5F65FUht03GM/jo
|
||||
BtWZlFVDgSa5kx56azJULf35Hb13ToLEM3KHBqXvW3XhOHprLLcAGTxkPh3KpDT3
|
||||
20dk1vqGWBXq0S0i3NkwTbMCq5GDA7IXmG9g5vdEj0q6gaK/C0rMnLmiRFLQZT+2
|
||||
l1/Z2JxJu9FGvRCyQnGoheWLmeYbAJNddqsybKg5F1OcOE2RAoHAY2dQKRBqhaPc
|
||||
UZAgdoaMg47V/6p1/bX4MbCWbBgdW+2kLkeNnMIeLKhtSxClwlNGipqEkdf89cwD
|
||||
zrk9XAHSJ5l7eYlPoRLjBV3uEPaM5s5e2jJWbW/rMrR1e5RJ2C2eTRlZLuQLvJXf
|
||||
32Vnod3GK5n0duif+lcdyvlYqc6bMFxCiroF5+IVRSW86WjBixo3zOGqRS6U9YFH
|
||||
HmR/wMG3qCFYGKmg7eAndb9lgWvkHVq3ft/YKMY2IRnIbtrKntqE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,168 +0,0 @@
|
||||
import NbdClient from '../index.mjs'
|
||||
import { spawn, exec } from 'node:child_process'
|
||||
import fs from 'node:fs/promises'
|
||||
import { test } from 'tap'
|
||||
import tmp from 'tmp'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { Socket } from 'node:net'
|
||||
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||
import assert from 'node:assert'
|
||||
|
||||
const FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
async function spawnNbdKit(path) {
|
||||
let tries = 5
|
||||
// wait for server to be ready
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
'--tls=on',
|
||||
'--tls-certificates=./tests/',
|
||||
// '--tls-verify-peer',
|
||||
// '--verbose',
|
||||
'--exit-with-parent',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
nbdServer.on('error', err => {
|
||||
console.error(err)
|
||||
})
|
||||
do {
|
||||
try {
|
||||
const socket = new Socket()
|
||||
await new Promise((resolve, reject) => {
|
||||
socket.connect(NBD_DEFAULT_PORT, 'localhost')
|
||||
socket.once('error', reject)
|
||||
socket.once('connect', resolve)
|
||||
})
|
||||
socket.destroy()
|
||||
break
|
||||
} catch (err) {
|
||||
tries--
|
||||
if (tries <= 0) {
|
||||
throw err
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
}
|
||||
} while (true)
|
||||
return nbdServer
|
||||
}
|
||||
|
||||
async function killNbdKit() {
|
||||
return new Promise((resolve, reject) =>
|
||||
exec('pkill -9 -f -o nbdkit', err => {
|
||||
err ? reject(err) : resolve()
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new NbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: `-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
{
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
const nbdIterator = client.readBlocks(function* () {
|
||||
for (const index of indexes) {
|
||||
yield { index, size: CHUNK_SIZE }
|
||||
}
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
if (i % 7 === 0) {
|
||||
// kill the older nbdkit process
|
||||
await killNbdKit()
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
|
||||
// we can reuse the conneciton to read other blocks
|
||||
// default iterator
|
||||
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
|
||||
let nb = 0
|
||||
for await (const block of nbdIteratorWithDefaultBlockIterator) {
|
||||
nb++
|
||||
tap.equal(block.length, 2 * 1024 * 1024)
|
||||
}
|
||||
|
||||
tap.equal(nb, 5)
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
// double disconnection shouldn't pose any problem
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -1,21 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/8wLopj/iZY6i
|
||||
jmpvgCJsl+zY0hQZQcIoaCs0H75u8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZol
|
||||
evaSJLNT2Iolscvc2W9NCF4N1V6yzs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh
|
||||
67u+uI40732AfQqD01BNCTD/uHRBlKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y
|
||||
2SJVTeT4a1sSJixl6I1YPmt80FJhgq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULw
|
||||
dJOGgmqGRDzgZKJS5UUpxe/ViEO459I18vIkgibaRYhENgmnP3lIzTOLlUe07tbS
|
||||
ML5RGBbBAgMBAAECggEATLYiafcTHfgnZmjTOad0WoDnC4n9tVBV948WARlUooLS
|
||||
duL3RQRHCLz9/ZaTuFA1XDpNcYyc/B/IZoU7aJGZR3+JSmJBjowpUphu+klVNNG4
|
||||
i6lDRrzYlUI0hfdLjHsDTDBIKi91KcB0lix/VkvsrVQvDHwsiR2ZAIiVWAWQFKrR
|
||||
5O3DhSTHbqyq47uR58rWr4Zf3zvZaUl841AS1yELzCiZqz7AenvyWphim0c0XA5d
|
||||
I63CEShntHnEAA9OMcP8+BNf/3AmqB4welY+m8elB3aJNH+j7DKq/AWqaM5nl2PC
|
||||
cS6qgpxwOyTxEOyj1xhwK5ZMRR3heW3NfutIxSOPlwKBgQDB9ZkrBeeGVtCISO7C
|
||||
eCANzSLpeVrahTvaCSQLdPHsLRLDUc+5mxdpi3CaRlzYs3S1OWdAtyWX9mBryltF
|
||||
qDPhCNjFDyHok4D3wLEWdS9oUVwEKUM8fOPW3tXLLiMM7p4862Qo7LqnqHzPqsnz
|
||||
22iZo5yjcc7aLJ+VmFrbAowwOwKBgQD9WNCvczTd7Ymn7zEvdiAyNoS0OZ0orwEJ
|
||||
zGaxtjqVguGklNfrb/UB+eKNGE80+YnMiSaFc9IQPetLntZdV0L7kWYdCI8kGDNA
|
||||
DbVRCOp+z8DwAojlrb/zsYu23anQozT3WeHxVU66lNuyEQvSW2tJa8gN1htrD7uY
|
||||
5KLibYrBMwKBgEM0iiHyJcrSgeb2/mO7o7+keJhVSDm3OInP6QFfQAQJihrLWiKB
|
||||
rpcPjbCm+LzNUX8JqNEvpIMHB1nR/9Ye9frfSdzd5W3kzicKSVHywL5wkmWOtpFa
|
||||
5Mcq5wFDtzlf5MxO86GKhRJauwRptRgdyhySKFApuva1x4XaCIEiXNjJAoGBAN82
|
||||
t3c+HCBEv3o05rMYcrmLC1T3Rh6oQlPtwbVmByvfywsFEVCgrc/16MPD3VWhXuXV
|
||||
GRmPuE8THxLbead30M5xhvShq+xzXgRbj5s8Lc9ZIHbW5OLoOS1vCtgtaQcoJOyi
|
||||
Rs4pCVqe+QpktnO6lEZ2Libys+maTQEiwNibBxu9AoGAUG1V5aKMoXa7pmGeuFR6
|
||||
ES+1NDiCt6yDq9BsLZ+e2uqvWTkvTGLLwvH6xf9a0pnnILd0AUTKAAaoUdZS6++E
|
||||
cGob7fxMwEE+UETp0QBgLtfjtExMOFwr2avw8PV4CYEUkPUAm2OFB2Twh+d/PNfr
|
||||
FAxF1rN47SBPNbFI8N4TFsg=
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 reedog117
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
forked from https://github.com/reedog117/node-vsphere-soap
|
||||
|
||||
# node-vsphere-soap
|
||||
|
||||
[](https://gitter.im/reedog117/node-vsphere-soap?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
This is a Node.js module to connect to VMware vCenter servers and/or ESXi hosts and perform operations using the [vSphere Web Services API]. If you're feeling really adventurous, you can use this module to port vSphere operations from other languages (such as the Perl, Python, and Go libraries that exist) and have fully native Node.js code controlling your VMware virtual infrastructure!
|
||||
|
||||
This is very much in alpha.
|
||||
|
||||
## Authors
|
||||
|
||||
- Patrick C - [@reedog117]
|
||||
|
||||
## Version
|
||||
|
||||
0.0.2-5
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
$ npm install node-vsphere-soap --save
|
||||
```
|
||||
|
||||
## Sample Code
|
||||
|
||||
### To connect to a vCenter server:
|
||||
|
||||
var nvs = require('node-vsphere-soap');
|
||||
var vc = new nvs.Client(host, user, password, sslVerify);
|
||||
vc.once('ready', function() {
|
||||
// perform work here
|
||||
});
|
||||
vc.once('error', function(err) {
|
||||
// handle error here
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- host = hostname or IP of vCenter/ESX/ESXi server
|
||||
- user = username
|
||||
- password = password
|
||||
- sslVerify = true|false - set to false if you have self-signed/unverified certificates
|
||||
|
||||
#### Events
|
||||
|
||||
- ready = emits when session authenticated with server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
#### Client instance variables
|
||||
|
||||
- serviceContent - ServiceContent object retrieved by RetrieveServiceContent API call
|
||||
- userName - username of authenticated user
|
||||
- fullName - full name of authenticated user
|
||||
|
||||
### To run a command:
|
||||
|
||||
var vcCmd = vc.runCommand( commandToRun, arguments );
|
||||
vcCmd.once('result', function( result, raw, soapHeader) {
|
||||
// handle results
|
||||
});
|
||||
vcCmd.once('error', function( err) {
|
||||
// handle errors
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- commandToRun = Method from the vSphere API
|
||||
- arguments = JSON document containing arguments to send
|
||||
|
||||
#### Events
|
||||
|
||||
- result = emits when session authenticated with server
|
||||
- _result_ contains the JSON-formatted result from the server
|
||||
- _raw_ contains the raw SOAP XML response from the server
|
||||
- _soapHeader_ contains any soapHeaders from the server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
Make sure you check out tests/vsphere-soap.test.js for examples on how to create commands to run
|
||||
|
||||
## Development
|
||||
|
||||
node-vsphere-soap uses a number of open source projects to work properly:
|
||||
|
||||
- [node.js] - evented I/O for the backend
|
||||
- [node-soap] - SOAP client for Node.js
|
||||
- [soap-cookie] - cookie authentication for the node-soap module
|
||||
- [lodash] - for quickly manipulating JSON
|
||||
- [lab] - testing engine
|
||||
- [code] - assertion engine used with lab
|
||||
|
||||
Want to contribute? Great!
|
||||
|
||||
### Todo's
|
||||
|
||||
- Write More Tests
|
||||
- Create Travis CI test harness with a fake vCenter Instance
|
||||
- Add Code Comments
|
||||
|
||||
### Testing
|
||||
|
||||
I have been testing on a Mac with node v0.10.36 and both ESXi and vCenter 5.5.
|
||||
|
||||
To edit tests, edit the file **test/vsphere-soap.test.js**
|
||||
|
||||
To point the module at your own vCenter/ESXi host, edit **config-test.stub.js** and save it as **config-test.js**
|
||||
|
||||
To run test scripts:
|
||||
|
||||
```sh
|
||||
$ npm test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
[vSphere Web Services API]: http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/right-pane.html
|
||||
[node-soap]: https://github.com/vpulim/node-soap
|
||||
[node.js]: http://nodejs.org/
|
||||
[soap-cookie]: https://github.com/shanestillwell/soap-cookie
|
||||
[code]: https://github.com/hapijs/code
|
||||
[lab]: https://github.com/hapijs/lab
|
||||
[lodash]: https://lodash.com/
|
||||
[@reedog117]: http://www.twitter.com/reedog117
|
||||
@@ -1,230 +0,0 @@
|
||||
/*
|
||||
|
||||
node-vsphere-soap
|
||||
|
||||
client.js
|
||||
|
||||
This file creates the Client class
|
||||
|
||||
- when the class is instantiated, a connection will be made to the ESXi/vCenter server to verify that the creds are good
|
||||
- upon a bad login, the connnection will be terminated
|
||||
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
import axios from 'axios'
|
||||
import https from 'node:https'
|
||||
import util from 'util'
|
||||
import soap from 'soap'
|
||||
import Cookie from 'soap-cookie' // required for session persistence
|
||||
|
||||
// Client class
|
||||
// inherits from EventEmitter
|
||||
// possible events: connect, error, ready
|
||||
|
||||
export function Client(vCenterHostname, username, password, sslVerify) {
|
||||
this.status = 'disconnected'
|
||||
this.reconnectCount = 0
|
||||
|
||||
sslVerify = typeof sslVerify !== 'undefined' ? sslVerify : false
|
||||
|
||||
EventEmitter.call(this)
|
||||
|
||||
// sslVerify argument handling
|
||||
if (sslVerify) {
|
||||
this.clientopts = {}
|
||||
} else {
|
||||
this.clientopts = {
|
||||
request: axios.create({
|
||||
httpsAgent: new https.Agent({
|
||||
rejectUnauthorized: false,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
this.connectionInfo = {
|
||||
host: vCenterHostname,
|
||||
user: username,
|
||||
password,
|
||||
sslVerify,
|
||||
}
|
||||
|
||||
this._loginArgs = {
|
||||
userName: this.connectionInfo.user,
|
||||
password: this.connectionInfo.password,
|
||||
}
|
||||
|
||||
this._vcUrl = 'https://' + this.connectionInfo.host + '/sdk/vimService.wsdl'
|
||||
|
||||
// connect to the vCenter / ESXi host
|
||||
this.on('connect', this._connect)
|
||||
this.emit('connect')
|
||||
|
||||
// close session
|
||||
this.on('close', this._close)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
util.inherits(Client, EventEmitter)
|
||||
|
||||
Client.prototype.runCommand = function (command, args) {
|
||||
const self = this
|
||||
let cmdargs
|
||||
if (!args || args === null) {
|
||||
cmdargs = {}
|
||||
} else {
|
||||
cmdargs = args
|
||||
}
|
||||
|
||||
const emitter = new EventEmitter()
|
||||
|
||||
// check if client has successfully connected
|
||||
if (self.status === 'ready' || self.status === 'connecting') {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
} else {
|
||||
// if connection not ready or connecting, reconnect to instance
|
||||
if (self.status === 'disconnected') {
|
||||
self.emit('connect')
|
||||
}
|
||||
self.once('ready', function () {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return emitter
|
||||
}
|
||||
|
||||
Client.prototype.close = function () {
|
||||
const self = this
|
||||
|
||||
self.emit('close')
|
||||
}
|
||||
|
||||
Client.prototype._connect = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status !== 'disconnected') {
|
||||
return
|
||||
}
|
||||
|
||||
self.status = 'connecting'
|
||||
|
||||
soap.createClient(
|
||||
self._vcUrl,
|
||||
self.clientopts,
|
||||
function (err, client) {
|
||||
if (err) {
|
||||
self.emit('error', err)
|
||||
throw err
|
||||
}
|
||||
|
||||
self.client = client // save client for later use
|
||||
|
||||
self
|
||||
.runCommand('RetrieveServiceContent', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
if (!result.returnval) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', raw)
|
||||
return
|
||||
}
|
||||
|
||||
self.serviceContent = result.returnval
|
||||
self.sessionManager = result.returnval.sessionManager
|
||||
const loginArgs = { _this: self.sessionManager, ...self._loginArgs }
|
||||
|
||||
self
|
||||
.runCommand('Login', loginArgs)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
self.authCookie = new Cookie(client.lastResponseHeaders)
|
||||
self.client.setSecurity(self.authCookie) // needed since vSphere SOAP WS uses cookies
|
||||
|
||||
self.userName = result.returnval.userName
|
||||
self.fullName = result.returnval.fullName
|
||||
self.reconnectCount = 0
|
||||
|
||||
self.status = 'ready'
|
||||
self.emit('ready')
|
||||
process.once('beforeExit', self._close)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
},
|
||||
self._vcUrl
|
||||
)
|
||||
}
|
||||
|
||||
Client.prototype._close = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status === 'ready') {
|
||||
self
|
||||
.runCommand('Logout', { _this: self.sessionManager })
|
||||
.once('result', function () {
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
.once('error', function () {
|
||||
/* don't care of error during disconnection */
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
} else {
|
||||
self.status = 'disconnected'
|
||||
}
|
||||
}
|
||||
|
||||
function _soapErrorHandler(self, emitter, command, args, err) {
|
||||
err = err || { body: 'general error' }
|
||||
|
||||
if (err.body.match(/session is not authenticated/)) {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
|
||||
if (self.reconnectCount < 10) {
|
||||
self.reconnectCount += 1
|
||||
self
|
||||
.runCommand(command, args)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
})
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
// end
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"name": "@vates/node-vsphere-soap",
|
||||
"version": "2.0.0",
|
||||
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
||||
"main": "lib/client.mjs",
|
||||
"author": "reedog117",
|
||||
"repository": {
|
||||
"directory": "@vates/node-vsphere-soap",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"axios": "^1.4.0",
|
||||
"soap": "^1.0.0",
|
||||
"soap-cookie": "^0.10.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"keywords": [
|
||||
"vsphere",
|
||||
"vcenter",
|
||||
"api",
|
||||
"soap",
|
||||
"wsdl"
|
||||
],
|
||||
"preferGlobal": false,
|
||||
"license": "MIT",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// place your own credentials here for a vCenter or ESXi server
|
||||
// this information will be used for connecting to a vCenter instance
|
||||
// for module testing
|
||||
// name the file config-test.js
|
||||
|
||||
export const vCenterTestCreds = {
|
||||
vCenterIP: 'vcsa',
|
||||
vCenterUser: 'vcuser',
|
||||
vCenterPassword: 'vcpw',
|
||||
vCenter: true,
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
/*
|
||||
vsphere-soap.test.js
|
||||
|
||||
tests for the vCenterConnectionInstance class
|
||||
*/
|
||||
|
||||
import assert from 'assert'
|
||||
import { describe, it } from 'test'
|
||||
|
||||
import * as vc from '../lib/client.mjs'
|
||||
|
||||
// eslint-disable-next-line n/no-missing-import
|
||||
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
|
||||
|
||||
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
||||
|
||||
describe('Client object initialization:', function () {
|
||||
it('provides a successful login', { timeout: 5000 }, function (t, done) {
|
||||
VItest.once('ready', function () {
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
}).once('error', function (err) {
|
||||
console.error(err)
|
||||
// this should fail if there's a problem
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Client reconnection test:', function () {
|
||||
it('can successfully reconnect', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('Logout', { _this: VItest.serviceContent.sessionManager })
|
||||
.once('result', function (result) {
|
||||
// now we're logged out, so let's try running a command to test automatic re-login
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// these tests don't work yet
|
||||
describe('Client tests - query commands:', function () {
|
||||
it('retrieves current time', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('retrieves current time 2 (check for event clobbering)', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('can obtain the names of all Virtual Machines in the inventory', { timeout: 20000 }, function (t, done) {
|
||||
// get property collector
|
||||
const propertyCollector = VItest.serviceContent.propertyCollector
|
||||
// get view manager
|
||||
const viewManager = VItest.serviceContent.viewManager
|
||||
// get root folder
|
||||
const rootFolder = VItest.serviceContent.rootFolder
|
||||
|
||||
let containerView, objectSpec, traversalSpec, propertySpec, propertyFilterSpec
|
||||
// this is the equivalent to
|
||||
VItest.runCommand('CreateContainerView', {
|
||||
_this: viewManager,
|
||||
container: rootFolder,
|
||||
type: ['VirtualMachine'],
|
||||
recursive: true,
|
||||
}).once('result', function (result) {
|
||||
// build all the data structures needed to query all the vm names
|
||||
containerView = result.returnval
|
||||
|
||||
objectSpec = {
|
||||
attributes: { 'xsi:type': 'ObjectSpec' }, // setting attributes xsi:type is important or else the server may mis-recognize types!
|
||||
obj: containerView,
|
||||
skip: true,
|
||||
}
|
||||
|
||||
traversalSpec = {
|
||||
attributes: { 'xsi:type': 'TraversalSpec' },
|
||||
name: 'traverseEntities',
|
||||
type: 'ContainerView',
|
||||
path: 'view',
|
||||
skip: false,
|
||||
}
|
||||
|
||||
objectSpec = { ...objectSpec, selectSet: [traversalSpec] }
|
||||
|
||||
propertySpec = {
|
||||
attributes: { 'xsi:type': 'PropertySpec' },
|
||||
type: 'VirtualMachine',
|
||||
pathSet: ['name'],
|
||||
}
|
||||
|
||||
propertyFilterSpec = {
|
||||
attributes: { 'xsi:type': 'PropertyFilterSpec' },
|
||||
propSet: [propertySpec],
|
||||
objectSet: [objectSpec],
|
||||
}
|
||||
// TODO: research why it fails if propSet is declared after objectSet
|
||||
|
||||
VItest.runCommand('RetrievePropertiesEx', {
|
||||
_this: propertyCollector,
|
||||
specSet: [propertyFilterSpec],
|
||||
options: { attributes: { type: 'RetrieveOptions' } },
|
||||
})
|
||||
.once('result', function (result, raw) {
|
||||
assert.notEqual(result.returnval.objects, null)
|
||||
if (Array.isArray(result.returnval.objects)) {
|
||||
assert.strictEqual(result.returnval.objects[0].obj.attributes.type, 'VirtualMachine')
|
||||
} else {
|
||||
assert.strictEqual(result.returnval.objects.obj.attributes.type, 'VirtualMachine')
|
||||
}
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error('\n\nlast request : ' + VItest.client.lastRequest, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -24,25 +24,3 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
@@ -43,28 +43,6 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,37 +1,18 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const isUtf8 = require('isutf8')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, `null` will be returned *unless* the stream has ended, in which case all of the data remaining will be returned.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown|null>} - A Promise that resolves to the read chunk if available, or null if end of stream is reached.
|
||||
* @param {number} size - The number of bytes to read.
|
||||
* @returns {Promise<Buffer|null>} - A Promise that resolves to a Buffer of up to size bytes if available, or null if end of stream is reached. The Promise is rejected if there is an error while reading from the stream.
|
||||
*/
|
||||
const readChunk = (stream, size) =>
|
||||
stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: stream.closed || stream.readableEnded
|
||||
stream.closed || stream.readableEnded
|
||||
? Promise.resolve(null)
|
||||
: size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
@@ -62,17 +43,9 @@ exports.readChunk = readChunk
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, the returned promise is rejected.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown>} - A Promise that resolves to the read chunk.
|
||||
* @param {number} size - The number of bytes to read.
|
||||
* @returns {Promise<Buffer>} - A Promise that resolves to a Buffer of size bytes. The Promise is rejected if there is an error while reading from the stream.
|
||||
*/
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
@@ -81,14 +54,7 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
|
||||
// Buffer.isUtf8 is too recent for now
|
||||
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
|
||||
|
||||
if (chunk.length < 1024 && isUtf8(chunk)) {
|
||||
error.text = chunk.toString('utf8')
|
||||
}
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
@@ -99,69 +65,3 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a readable stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<number>} A Promise that resolves to the number of bytes actually skipped. If the end of the stream is reached before all bytes are skipped, the Promise resolves to the number of bytes that were skipped before the end of the stream was reached. The Promise is rejected if there is an error while reading from the stream.
|
||||
*/
|
||||
async function skip(stream, size) {
|
||||
return stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: size === 0 || stream.closed || stream.readableEnded
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
exports.skip = skip
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<void>} - A Promise that resolves when the exact number of bytes have been skipped. The Promise is rejected if there is an error while reading from the stream or the stream ends before the exact number of bytes have been skipped.
|
||||
*/
|
||||
exports.skipStrict = async function skipStrict(stream, size) {
|
||||
const bytesSkipped = await skip(stream, size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,58 +5,12 @@ const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict, skip, skipStrict } = require('./')
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = fn => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(fn(stream, 10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
// only supported for Node >= 18
|
||||
if (process.versions.node.split('.')[0] >= 18) {
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(fn(stream, 10)), error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('rejects if size is greater than or equal to 1 GiB', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 1024 * 1024 * 1024))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
makeErrorTests(readChunk)
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await readChunk(makeStream([])), null)
|
||||
})
|
||||
@@ -84,6 +38,10 @@ describe('readChunk', () => {
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
@@ -94,6 +52,14 @@ describe('readChunk', () => {
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
@@ -102,86 +68,10 @@ describe('readChunkStrict', function () {
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8', async () => {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.strictEqual(error.text, 'foobar')
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, non utf8 ', async () => {
|
||||
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, Buffer.concat(source))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8 , long data', async () => {
|
||||
const source = Buffer.from('a'.repeat(1500))
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, source)
|
||||
})
|
||||
|
||||
it('succeed', async () => {
|
||||
const source = Buffer.from('a'.repeat(20))
|
||||
const chunk = await readChunkStrict(makeStream([source]), 10)
|
||||
assert.deepEqual(source.subarray(10), chunk)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
makeErrorTests(skip)
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await skip(makeStream(['foo']), 0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const stream = await makeStream([])
|
||||
await readChunk(stream)
|
||||
|
||||
assert.strictEqual(await skip(stream, 10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const stream = makeStream('foo bar')
|
||||
|
||||
assert.strictEqual(await skip(stream, 4), 4)
|
||||
assert.deepEqual(await readChunk(stream, 4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
|
||||
it('put back if it read too much', async () => {
|
||||
let source = makeStream(['foo', 'bar'])
|
||||
await skip(source, 1) // read part of data chunk
|
||||
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
|
||||
assert.strictEqual(chunk, 'oo')
|
||||
|
||||
source = makeStream(['foo', 'bar'])
|
||||
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(skipStrict(makeStream('foo bar'), 10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
it('succeed', async () => {
|
||||
const source = makeStream(['foo', 'bar', 'baz'])
|
||||
const res = await skipStrict(source, 4)
|
||||
assert.strictEqual(res, undefined)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.2.0",
|
||||
"version": "1.0.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -33,8 +33,5 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"isutf8": "^4.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,75 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/stream-reader
|
||||
|
||||
[](https://npmjs.org/package/@vates/stream-reader)  [](https://bundlephobia.com/result?p=@vates/stream-reader) [](https://npmjs.org/package/@vates/stream-reader)
|
||||
|
||||
> Efficiently reads and skips chunks of a given size in a stream
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/stream-reader):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/stream-reader
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,123 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert')
|
||||
const { finished, Readable } = require('node:stream')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Inspired by https://github.com/nodejs/node/blob/85705a47958c9ae5dbaa1f57456db19bdefdc494/lib/internal/streams/readable.js#L1107
|
||||
class StreamReader {
|
||||
#ended = false
|
||||
#error
|
||||
#executor = resolve => {
|
||||
this.#resolve = resolve
|
||||
}
|
||||
#stream
|
||||
#resolve = noop
|
||||
|
||||
constructor(stream) {
|
||||
stream = typeof stream.pipe === 'function' ? stream : Readable.from(stream)
|
||||
|
||||
this.#stream = stream
|
||||
|
||||
stream.on('readable', () => this.#resolve())
|
||||
|
||||
finished(stream, { writable: false }, error => {
|
||||
this.#error = error
|
||||
this.#ended = true
|
||||
this.#resolve()
|
||||
})
|
||||
}
|
||||
|
||||
async read(size) {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
}
|
||||
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const value = this.#stream.read(size)
|
||||
if (value !== null) {
|
||||
return value
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async readStrict(size) {
|
||||
const chunk = await this.read(size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
async skip(size) {
|
||||
if (size === 0) {
|
||||
return size
|
||||
}
|
||||
|
||||
let toSkip = size
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return size - toSkip
|
||||
}
|
||||
|
||||
const data = this.#stream.read()
|
||||
if (data !== null) {
|
||||
toSkip -= data === null ? 0 : data.length
|
||||
if (toSkip > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (toSkip < 0) {
|
||||
this.#stream.unshift(data.slice(toSkip))
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async skipStrict(size) {
|
||||
const bytesSkipped = await this.skip(size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StreamReader.prototype[Symbol.asyncIterator] = async function* asyncIterator() {
|
||||
let chunk
|
||||
while ((chunk = await this.read()) !== null) {
|
||||
yield chunk
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = StreamReader
|
||||
@@ -1,141 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const StreamReader = require('./index.js')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = method => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(new StreamReader(stream)[method](10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(new StreamReader(stream)[method](10)), error)
|
||||
})
|
||||
}
|
||||
|
||||
describe('read()', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).read(0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream([])).read(), null)
|
||||
})
|
||||
|
||||
makeErrorTests('read')
|
||||
|
||||
it('returns null if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.read(), null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(), Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(2), Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(4), Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(10), Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
assert.strictEqual(await new StreamReader(makeStream.obj(chunks)).read(), chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('readStrict()', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).readStrict())
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended without data')
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream(['foo', 'bar'])).readStrict(10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip()', function () {
|
||||
makeErrorTests('skip')
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream(['foo'])).skip(0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.skip(10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const reader = new StreamReader(makeStream('foo bar'))
|
||||
|
||||
assert.strictEqual(await reader.skip(4), 4)
|
||||
assert.deepEqual(await reader.read(4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream('foo bar')).skip(10), 7)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict()', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream('foo bar')).skipStrict(10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
})
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/stream-reader",
|
||||
"description": "Efficiently reads and skips chunks of a given size in a stream",
|
||||
"keywords": [
|
||||
"async",
|
||||
"chunk",
|
||||
"data",
|
||||
"node",
|
||||
"promise",
|
||||
"read",
|
||||
"reader",
|
||||
"skip",
|
||||
"stream"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/stream-reader",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/stream-reader",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -2,10 +2,7 @@
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
name: 'my task',
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
@@ -14,15 +11,13 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId, properties } = event
|
||||
const { name, parentId } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -30,18 +25,8 @@ const task = new Task({
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
await task.abort()
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
@@ -49,11 +34,7 @@ const result = await task.runInside(fn)
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
@@ -71,67 +52,3 @@ Task.warning(message, data)
|
||||
// - progress
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
|
||||
@@ -18,10 +18,7 @@ npm install --save @vates/task
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
name: 'my task',
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
@@ -30,15 +27,13 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId, properties } = event
|
||||
const { name, parentId } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -46,18 +41,8 @@ const task = new Task({
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
await task.abort()
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
@@ -65,11 +50,7 @@ const result = await task.runInside(fn)
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
@@ -88,70 +69,6 @@ Task.warning(message, data)
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionnary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = {
|
||||
id,
|
||||
properties: { __proto__: null, ...event.properties },
|
||||
start: event.timestamp,
|
||||
status: 'pending',
|
||||
}
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
if (parentId === undefined) {
|
||||
Object.defineProperty(taskLog, '$root', { value: taskLog })
|
||||
|
||||
// start of a root task
|
||||
onRootTaskStart(taskLog)
|
||||
} else {
|
||||
// start of a subtask
|
||||
const parent = taskLogs.get(parentId)
|
||||
assert.notEqual(parent, undefined)
|
||||
|
||||
// inject a (non-enumerable) reference to the parent and the root task
|
||||
Object.defineProperties(taskLog, { $parent: { value: parent }, $root: { value: parent.$root } })
|
||||
;(parent.tasks ?? (parent.tasks = [])).push(taskLog)
|
||||
}
|
||||
} else {
|
||||
taskLog = taskLogs.get(id)
|
||||
assert.notEqual(taskLog, undefined)
|
||||
|
||||
if (type === 'info' || type === 'warning') {
|
||||
const key = type + 's'
|
||||
const { data, message } = event
|
||||
;(taskLog[key] ?? (taskLog[key] = [])).push({ data, message })
|
||||
} else if (type === 'property') {
|
||||
;(taskLog.properties ?? (taskLog.properties = { __proto__: null }))[event.name] = event.value
|
||||
} else if (type === 'end') {
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
} else if (type === 'abortionRequested') {
|
||||
taskLog.abortionRequestedAt = event.timestamp
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
onRootTaskEnd(taskLog)
|
||||
}
|
||||
}
|
||||
|
||||
onTaskUpdate(taskLog)
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { makeOnProgress } = require('./combineEvents.js')
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
describe('makeOnProgress()', function () {
|
||||
it('works', async function () {
|
||||
const events = []
|
||||
let log
|
||||
const task = new Task({
|
||||
properties: { name: 'task' },
|
||||
onProgress: makeOnProgress({
|
||||
onRootTaskStart(log_) {
|
||||
assert.equal(log, undefined)
|
||||
log = log_
|
||||
events.push('onRootTaskStart')
|
||||
},
|
||||
onRootTaskEnd(log_) {
|
||||
assert.equal(log_, log)
|
||||
events.push('onRootTaskEnd')
|
||||
},
|
||||
|
||||
onTaskUpdate(log_) {
|
||||
assert.equal(log_.$root, log)
|
||||
events.push('onTaskUpdate')
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
assert.equal(events.length, 0)
|
||||
|
||||
let i = 0
|
||||
|
||||
await task.run(async () => {
|
||||
assert.equal(events[i++], 'onRootTaskStart')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.id, task.id)
|
||||
assert.equal(log.properties.name, 'task')
|
||||
assert(Math.abs(log.start - Date.now()) < 10)
|
||||
|
||||
Task.set('name', 'new name')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.name, 'new name')
|
||||
|
||||
Task.set('progress', 0)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 0)
|
||||
|
||||
Task.info('foo', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
|
||||
|
||||
const subtask = new Task({ properties: { name: 'subtask' } })
|
||||
await subtask.run(() => {
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].properties.name, 'subtask')
|
||||
|
||||
Task.warning('bar', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
|
||||
|
||||
subtask.abort()
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.tasks[0].abortionRequestedAt - Date.now()) < 10)
|
||||
})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].status, 'success')
|
||||
|
||||
Task.set('progress', 100)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 100)
|
||||
})
|
||||
assert.equal(events[i++], 'onRootTaskEnd')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.end - Date.now()) < 10)
|
||||
assert.equal(log.status, 'success')
|
||||
})
|
||||
})
|
||||
@@ -10,15 +10,14 @@ function define(object, property, value) {
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const ABORTED = 'aborted'
|
||||
const ABORTING = 'aborting'
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
const asyncStorage = global[asyncStorageKey] ?? (global[asyncStorageKey] = new AsyncLocalStorage())
|
||||
exports.STATUS = { ABORTED, ABORTING, FAILURE, PENDING, SUCCESS }
|
||||
|
||||
const asyncStorage = new AsyncLocalStorage()
|
||||
const getTask = () => asyncStorage.getStore()
|
||||
|
||||
exports.Task = class Task {
|
||||
@@ -67,6 +66,7 @@ exports.Task = class Task {
|
||||
|
||||
#abortController = new AbortController()
|
||||
#onProgress
|
||||
#parent
|
||||
|
||||
get id() {
|
||||
return (this.id = Math.random().toString(36).slice(2))
|
||||
@@ -82,14 +82,16 @@ exports.Task = class Task {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ properties, onProgress } = {}) {
|
||||
this.#startData = { properties }
|
||||
constructor({ name, onProgress }) {
|
||||
this.#startData = { name }
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
} else {
|
||||
const parent = getTask()
|
||||
if (parent !== undefined) {
|
||||
this.#parent = parent
|
||||
|
||||
const { signal } = parent.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
this.#abortController.abort(signal.reason)
|
||||
@@ -105,15 +107,7 @@ exports.Task = class Task {
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING) {
|
||||
this.#maybeStart()
|
||||
|
||||
this.#emit('abortionRequested', { reason: signal.reason })
|
||||
|
||||
if (!this.#running) {
|
||||
const status = FAILURE
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
this.#status = this.#running ? ABORTING : ABORTED
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -129,12 +123,14 @@ exports.Task = class Task {
|
||||
this.#onProgress(data)
|
||||
}
|
||||
|
||||
#maybeStart() {
|
||||
const startData = this.#startData
|
||||
if (startData !== undefined) {
|
||||
this.#startData = undefined
|
||||
this.#emit('start', startData)
|
||||
#handleMaybeAbortion(result) {
|
||||
if (this.status === ABORTING) {
|
||||
this.#status = ABORTED
|
||||
this.#emit('end', { status: ABORTED, result })
|
||||
return true
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async run(fn) {
|
||||
@@ -152,17 +148,22 @@ exports.Task = class Task {
|
||||
assert.equal(this.#running, false)
|
||||
this.#running = true
|
||||
|
||||
this.#maybeStart()
|
||||
const startData = this.#startData
|
||||
if (startData !== undefined) {
|
||||
this.#startData = undefined
|
||||
this.#emit('start', startData)
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await asyncStorage.run(this, fn)
|
||||
this.#handleMaybeAbortion(result)
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
const status = FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
if (!this.#handleMaybeAbortion(result)) {
|
||||
this.#status = FAILURE
|
||||
this.#emit('end', { status: FAILURE, result })
|
||||
}
|
||||
throw result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,347 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function assertEvent(task, expected, eventIndex = -1) {
|
||||
const logs = task.$events
|
||||
const actual = logs[eventIndex < 0 ? logs.length + eventIndex : eventIndex]
|
||||
|
||||
assert.equal(typeof actual, 'object')
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.deepEqual(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
// like new Task() but with a custom onProgress which adds event to task.$events
|
||||
function createTask(opts) {
|
||||
const events = []
|
||||
const task = new Task({ ...opts, onProgress: events.push.bind(events) })
|
||||
task.$events = events
|
||||
return task
|
||||
}
|
||||
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const properties = { foo: 0, bar: 1 }
|
||||
const task = createTask({ properties })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { type: 'start', properties }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
it('subtasks events are passed to root task', async function () {
|
||||
const task = createTask()
|
||||
const result = {}
|
||||
|
||||
await task.run(async () => {
|
||||
await new Task().run(() => result)
|
||||
})
|
||||
|
||||
assert.equal(task.$events.length, 4)
|
||||
assertEvent(task, { type: 'start', parentId: task.id }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
describe('.abortSignal', function () {
|
||||
it('is undefined when run outside a task', function () {
|
||||
assert.equal(Task.abortSignal, undefined)
|
||||
})
|
||||
|
||||
it('is the current abort signal when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
const { abortSignal } = Task
|
||||
assert.equal(abortSignal.aborted, false)
|
||||
task.abort()
|
||||
assert.equal(abortSignal.aborted, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.abort()', function () {
|
||||
it('aborts if the task throws fails with the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
throw result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
return result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.info()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.info('foo')
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.info('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'info',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.set('progress', 10)
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.set('progress', 10)
|
||||
assertEvent(task, {
|
||||
name: 'progress',
|
||||
type: 'property',
|
||||
value: 10,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.warning()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.warning('foo')
|
||||
})
|
||||
|
||||
it('emits an warning message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.warning('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'warning',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('#id', function () {
|
||||
it('can be set', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
assert.equal(task.id, 'foo')
|
||||
})
|
||||
|
||||
it('cannot be set more than once', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
|
||||
it('is randomly generated if not set', function () {
|
||||
assert.notEqual(createTask().id, createTask().id)
|
||||
})
|
||||
|
||||
it('cannot be set after being observed', function () {
|
||||
const task = createTask()
|
||||
noop(task.id)
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#status', function () {
|
||||
it('starts as pending', function () {
|
||||
assert.equal(createTask().status, 'pending')
|
||||
})
|
||||
|
||||
it('changes to success when finish without error', async function () {
|
||||
const task = createTask()
|
||||
await task.run(noop)
|
||||
assert.equal(task.status, 'success')
|
||||
})
|
||||
|
||||
it('changes to failure when finish with error', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
throw Error()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort()
|
||||
assert.equal(task.status, 'pending')
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted when not running', function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
})
|
||||
|
||||
function makeRunTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('finishes the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'success')
|
||||
assertEvent(task, {
|
||||
status: 'success',
|
||||
result: 'foo',
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.run', function () {
|
||||
makeRunTests((task, fn) => task.run(fn))
|
||||
})
|
||||
describe('.wrap', function () {
|
||||
makeRunTests((task, fn) => task.wrap(fn)())
|
||||
})
|
||||
|
||||
function makeRunInsideTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('does not finish the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'pending')
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.runInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.runInside(fn))
|
||||
})
|
||||
describe('.wrapInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.wrapInside(fn)())
|
||||
})
|
||||
})
|
||||
@@ -13,19 +13,11 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.2.0",
|
||||
"version": "0.0.1",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"exports": {
|
||||
".": "./index.js",
|
||||
"./combineEvents": "./combineEvents.js"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import getopts from 'getopts'
|
||||
import { basename, dirname } from 'path'
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.40.0",
|
||||
"@xen-orchestra/fs": "^4.0.1",
|
||||
"filenamify": "^6.0.0",
|
||||
"@xen-orchestra/backups": "^0.32.0",
|
||||
"@xen-orchestra/fs": "^3.3.2",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "1.0.10",
|
||||
"version": "1.0.2",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
295
@xen-orchestra/backups/Backup.js
Normal file
295
@xen-orchestra/backups/Backup.js
Normal file
@@ -0,0 +1,295 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const pTimeout = require('promise-toolbox/timeout')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
const DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
const DEFAULT_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const { type } = job
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
if (type === 'backup') {
|
||||
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
this.run = this._runVmBackup
|
||||
} else if (type === 'metadataBackup') {
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
this.run = this._runMetadataBackup
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
|
||||
const { getRemoteTimeout } = this._settings
|
||||
this._getAdapter = async function (remoteId) {
|
||||
try {
|
||||
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
|
||||
|
||||
return new Disposable(() => disposable.dispose(), {
|
||||
adapter: disposable.value,
|
||||
remoteId,
|
||||
})
|
||||
} catch (error) {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id: remoteId },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
import { Metadata } from './_runners/Metadata.mjs'
|
||||
import { VmsRemote } from './_runners/VmsRemote.mjs'
|
||||
import { VmsXapi } from './_runners/VmsXapi.mjs'
|
||||
|
||||
export function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'mirrorBackup':
|
||||
return new VmsRemote(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
'use strict'
|
||||
|
||||
export class DurablePartition {
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
64
@xen-orchestra/backups/HealthCheckVmBackup.js
Normal file
64
@xen-orchestra/backups/HealthCheckVmBackup.js
Normal file
@@ -0,0 +1,64 @@
|
||||
'use strict'
|
||||
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
#xapi
|
||||
#restoredVm
|
||||
|
||||
constructor({ restoredVm, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
return Task.run(
|
||||
{
|
||||
name: 'vmstart',
|
||||
},
|
||||
async () => {
|
||||
let restoredVm = this.#restoredVm
|
||||
const xapi = this.#xapi
|
||||
const restoredId = restoredVm.uuid
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
await xapi.callAsync(
|
||||
'VM.start',
|
||||
restoredVm.$ref,
|
||||
false, // Start paused?
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = 10 * 60 * 1000
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
// wait for the 'Running' event to be really stored in local xapi object cache
|
||||
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const running = new Date()
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
export class HealthCheckVmBackup {
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
|
||||
constructor({ restoredVm, timeout = 10 * 60 * 1000, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
this.#timeout = timeout
|
||||
}
|
||||
|
||||
async run() {
|
||||
return Task.run(
|
||||
{
|
||||
name: 'vmstart',
|
||||
},
|
||||
async () => {
|
||||
let restoredVm = this.#restoredVm
|
||||
const xapi = this.#xapi
|
||||
const restoredId = restoredVm.uuid
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
const waitForScript = restoredVm.tags.includes('xo-backup-health-check-xenstore')
|
||||
if (waitForScript) {
|
||||
await restoredVm.set_xenstore_data({
|
||||
'vm-data/xo-backup-health-check': 'planned',
|
||||
})
|
||||
}
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
await xapi.callAsync(
|
||||
'VM.start',
|
||||
restoredVm.$ref,
|
||||
false, // Start paused?
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = this.#timeout
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
// wait for the 'Running' event to be really stored in local xapi object cache
|
||||
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const running = new Date()
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Running state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const guestToolsReady = new Date()
|
||||
remainingTimeout -= guestToolsReady - running
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get he guest tools check ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
if (waitForScript) {
|
||||
const startedRestoredVm = await xapi.waitObjectState(
|
||||
restoredVm.$ref,
|
||||
vm =>
|
||||
vm?.xenstore_data !== undefined &&
|
||||
(vm.xenstore_data['vm-data/xo-backup-health-check'] === 'success' ||
|
||||
vm.xenstore_data['vm-data/xo-backup-health-check'] === 'failure'),
|
||||
{
|
||||
timeout: remainingTimeout,
|
||||
}
|
||||
)
|
||||
const scriptOk = new Date()
|
||||
remainingTimeout -= scriptOk - guestToolsReady
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(
|
||||
`Backup health check script did not update vm-data/xo-backup-health-check of ${restoredId} after ${
|
||||
timeout / 1000
|
||||
} second, got ${
|
||||
startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check']
|
||||
} instead of 'success' or 'failure'`
|
||||
)
|
||||
}
|
||||
|
||||
if (startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check'] !== 'success') {
|
||||
const message = startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check-error']
|
||||
if (message) {
|
||||
throw new Error(`Backup health check script failed with message ${message} for VM ${restoredId} `)
|
||||
} else {
|
||||
throw new Error(`Backup health check script failed for VM ${restoredId} `)
|
||||
}
|
||||
}
|
||||
Task.info('Backup health check script successfully executed')
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,16 @@
|
||||
import assert from 'node:assert'
|
||||
'use strict'
|
||||
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { importIncrementalVm } from './_incrementalVm.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
const assert = require('assert')
|
||||
|
||||
export class ImportVmBackup {
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
@@ -29,11 +31,11 @@ export class ImportVmBackup {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
|
||||
Object.entries(this._importDeltaVmSettings.mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
|
||||
backup = await adapter.readDeltaVmBackup(metadata, ignoredVdis)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
@@ -47,8 +49,8 @@ export class ImportVmBackup {
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importIncrementalVmSettings,
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
@@ -1,39 +1,51 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { execFile } from 'child_process'
|
||||
import { mount } from '@vates/fuse-vhd'
|
||||
import { readdir, lstat } from 'node:fs/promises'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { ZipFile } from 'yazl'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import tar from 'tar'
|
||||
import zlib from 'zlib'
|
||||
'use strict'
|
||||
|
||||
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
|
||||
import { cleanVm } from './_cleanVm.mjs'
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { getTmpDir } from './_getTmpDir.mjs'
|
||||
import { isMetadataFile } from './_backupType.mjs'
|
||||
import { isValidXva } from './_isValidXva.mjs'
|
||||
import { listPartitions, LVM_PARTITION_TYPE } from './_listPartitions.mjs'
|
||||
import { lvs, pvs } from './_lvm.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const pickBy = require('lodash/pickBy.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const {
|
||||
createVhdDirectoryFromStream,
|
||||
createVhdStreamWithLength,
|
||||
openVhd,
|
||||
VhdAbstract,
|
||||
VhdDirectory,
|
||||
VhdSynthetic,
|
||||
} = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
|
||||
export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
||||
const { mount } = require('@vates/fuse-vhd')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
const { strictEqual } = require('assert')
|
||||
|
||||
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
@@ -42,23 +54,20 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
const makeRelative = path => resolve('/', path).slice(1)
|
||||
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
|
||||
|
||||
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
|
||||
for (const relativePath of relativePaths) {
|
||||
const realPath = join(realBasePath, relativePath)
|
||||
const virtualPath = join(virtualBasePath, relativePath)
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
|
||||
const stats = await lstat(realPath)
|
||||
const { mode, mtime } = stats
|
||||
const opts = { mode, mtime }
|
||||
if (stats.isDirectory()) {
|
||||
zip.addEmptyDirectory(virtualPath, opts)
|
||||
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
|
||||
} else if (stats.isFile()) {
|
||||
zip.addFile(realPath, virtualPath, opts)
|
||||
}
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +84,7 @@ const debounceResourceFactory = factory =>
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
export class RemoteAdapter {
|
||||
class RemoteAdapter {
|
||||
constructor(
|
||||
handler,
|
||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||
@@ -186,6 +195,17 @@ export class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
@@ -202,24 +222,15 @@ export class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths, format) {
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
let outputStream
|
||||
|
||||
if (format === 'tgz') {
|
||||
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
|
||||
} else if (format === 'zip') {
|
||||
const zip = new ZipFile()
|
||||
await addZipEntries(zip, path, '', paths.map(makeRelative))
|
||||
zip.end()
|
||||
;({ outputStream } = zip)
|
||||
} else {
|
||||
throw new Error('unsupported format ' + format)
|
||||
}
|
||||
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
@@ -330,7 +341,7 @@ export class RemoteAdapter {
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler.getFilePath('/' + diskId)
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
@@ -401,27 +412,20 @@ export class RemoteAdapter {
|
||||
return `${baseName}.vhd`
|
||||
}
|
||||
|
||||
async listAllVms() {
|
||||
async listAllVmBackups() {
|
||||
const handler = this._handler
|
||||
const vmsUuids = []
|
||||
await asyncEach(await handler.list(BACKUP_DIR), async entry => {
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
|
||||
// ignore hidden and lock files
|
||||
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
|
||||
vmsUuids.push(entry)
|
||||
const vmBackups = await this.listVmBackups(entry)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[entry] = vmBackups
|
||||
}
|
||||
}
|
||||
})
|
||||
return vmsUuids
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const vmsUuids = await this.listAllVms()
|
||||
const backups = { __proto__: null }
|
||||
await asyncEach(vmsUuids, async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[vmUuid] = vmBackups
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
@@ -662,7 +666,7 @@ export class RemoteAdapter {
|
||||
return path
|
||||
}
|
||||
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
|
||||
const handler = this._handler
|
||||
if (this.useVhdDirectory()) {
|
||||
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
|
||||
@@ -673,26 +677,41 @@ export class RemoteAdapter {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
nbdClient,
|
||||
})
|
||||
await VhdAbstract.createAlias(handler, path, dataPath)
|
||||
return size
|
||||
} else {
|
||||
return this.outputStream(path, input, { checksum, validator })
|
||||
const inputWithSize = await createVhdStreamWithLength(input)
|
||||
return this.outputStream(path, inputWithSize, { checksum, validator, expectedSize: inputWithSize.length })
|
||||
}
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
async outputStream(path, input, { checksum = true, validator = noop, expectedSize } = {}) {
|
||||
const container = watchStreamSize(input)
|
||||
const handler = this._handler
|
||||
await handler.outputStream(path, input, {
|
||||
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
async validator(tmpPath) {
|
||||
async validator() {
|
||||
await input.task
|
||||
// size on file system can be bigger when encrypted ( IV + alignment padding)
|
||||
const size = await handler.getSize(tmpPath, { exact: false })
|
||||
if (Math.abs(size - container.size) > handler.getSizeApproximationMargin()) {
|
||||
return false
|
||||
if (expectedSize !== undefined) {
|
||||
// check that we read all the stream
|
||||
strictEqual(
|
||||
container.size,
|
||||
expectedSize,
|
||||
`transferred size ${container.size}, expected file size : ${expectedSize}`
|
||||
)
|
||||
}
|
||||
let size
|
||||
try {
|
||||
size = await this._handler.getSize(path)
|
||||
} catch (err) {
|
||||
// can fail is the remote is encrypted
|
||||
}
|
||||
if (size !== undefined) {
|
||||
// check that everything is written to disk
|
||||
strictEqual(size, container.size, `written size ${size}, transfered size : ${container.size}`)
|
||||
}
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
@@ -701,8 +720,8 @@ export class RemoteAdapter {
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
async _createVhdStream(handler, path, { useChain }) {
|
||||
const disposableSynthetic = useChain ? await VhdSynthetic.fromVhdChain(handler, path) : await openVhd(handler, path)
|
||||
async _createSyntheticStream(handler, path) {
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
|
||||
@@ -727,15 +746,15 @@ export class RemoteAdapter {
|
||||
return stream
|
||||
}
|
||||
|
||||
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true } = {}) {
|
||||
async readDeltaVmBackup(metadata, ignoredVdis) {
|
||||
const handler = this._handler
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const { vbds, vhds, vifs, vm } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async ref => {
|
||||
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain })
|
||||
streams[`${ref}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[ref]))
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -744,7 +763,7 @@ export class RemoteAdapter {
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||
vm,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -756,49 +775,7 @@ export class RemoteAdapter {
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
|
||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||
if (typeof metadata.vm.is_a_template === 'number') {
|
||||
const properties = {
|
||||
vbds: ['bootable', 'unpluggable', 'storage_lock', 'empty', 'currently_attached'],
|
||||
vdis: [
|
||||
'sharable',
|
||||
'read_only',
|
||||
'storage_lock',
|
||||
'managed',
|
||||
'missing',
|
||||
'is_a_snapshot',
|
||||
'allow_caching',
|
||||
'metadata_latest',
|
||||
],
|
||||
vifs: ['currently_attached', 'MAC_autogenerated'],
|
||||
vm: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_a_snapshot', 'is_snapshot_from_vmpp'],
|
||||
vmSnapshot: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_snapshot_from_vmpp'],
|
||||
}
|
||||
|
||||
function fixBooleans(obj, properties) {
|
||||
properties.forEach(property => {
|
||||
if (typeof obj[property] === 'number') {
|
||||
obj[property] = obj[property] === 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for (const [key, propertiesInKey] of Object.entries(properties)) {
|
||||
const value = metadata[key]
|
||||
if (value !== undefined) {
|
||||
// some properties of the metadata are collections indexed by the opaqueRef
|
||||
const isCollection = Object.keys(value).some(subKey => subKey.startsWith('OpaqueRef:'))
|
||||
if (isCollection) {
|
||||
Object.values(value).forEach(subValue => fixBooleans(subValue, propertiesInKey))
|
||||
} else {
|
||||
fixBooleans(value, propertiesInKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadata
|
||||
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -832,7 +809,11 @@ decorateMethodsWith(RemoteAdapter, {
|
||||
debounceResourceFactory,
|
||||
]),
|
||||
|
||||
_usePartitionFiles: Disposable.factory,
|
||||
|
||||
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
||||
|
||||
getPartition: Disposable.factory,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
26
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
26
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
@@ -0,0 +1,26 @@
|
||||
'use strict'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import { join, resolve } from 'node:path/posix'
|
||||
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from './RemoteAdapter.mjs'
|
||||
import { PATH_DB_DUMP } from './_runners/_PoolMetadataBackup.mjs'
|
||||
|
||||
export class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
||||
const dataFileName = resolve(backupId, metadata.data ?? 'data.json')
|
||||
const data = await handler.readFile(dataFileName)
|
||||
|
||||
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it
|
||||
const isJson = dataFileName.endsWith('.json')
|
||||
return isJson ? data.toString() : { encoding: 'base64', data: data.toString('base64') }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
import Zone from 'node-zone'
|
||||
'use strict'
|
||||
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
@@ -28,7 +30,7 @@ const serializeError = error =>
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
export class Task {
|
||||
class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
@@ -149,6 +151,7 @@ export class Task {
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
@@ -1,13 +1,16 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
'use strict'
|
||||
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { forkStreamUnpipe } from './_forkStreamUnpipe.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
export const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
export class PoolMetadataBackup {
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
501
@xen-orchestra/backups/_VmBackup.js
Normal file
501
@xen-orchestra/backups/_VmBackup.js
Normal file
@@ -0,0 +1,501 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
class VmBackup {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}) {
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._throttleStream = throttleStream
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
settings.unconditionalSnapshot ||
|
||||
this._isDelta ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const [baseUuid, snapshotOf] = await Promise.all([
|
||||
xapi.getField('VDI', baseRef, 'uuid'),
|
||||
xapi.getField('VDI', baseRef, 'snapshot_of'),
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const { tags } = this.vm
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
return
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.VmBackup = VmBackup
|
||||
|
||||
decorateMethodsWith(VmBackup, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,11 +1,12 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { join } from '@xen-orchestra/fs/path'
|
||||
'use strict'
|
||||
|
||||
import { DIR_XO_CONFIG_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
export class XoMetadataBackup {
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
@@ -22,17 +23,10 @@ export class XoMetadataBackup {
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = job.xoMetadata
|
||||
let dataBaseName = './data'
|
||||
|
||||
// JSON data is sent as plain string, binary data is sent as an object with `data` and `encoding properties
|
||||
const isJson = typeof data === 'string'
|
||||
if (isJson) {
|
||||
dataBaseName += '.json'
|
||||
}
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
data: dataBaseName,
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
@@ -42,8 +36,6 @@ export class XoMetadataBackup {
|
||||
null,
|
||||
2
|
||||
)
|
||||
|
||||
const dataFileName = join(dir, dataBaseName)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
@@ -60,7 +52,7 @@ export class XoMetadataBackup {
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(dataFileName, isJson ? data : Buffer.from(data.data, data.encoding), { dirMode })
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
6
@xen-orchestra/backups/_backupType.js
Normal file
6
@xen-orchestra/backups/_backupType.js
Normal file
@@ -0,0 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,4 +0,0 @@
|
||||
export const isMetadataFile = filename => filename.endsWith('.json')
|
||||
export const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
export const isXvaFile = filename => filename.endsWith('.xva')
|
||||
export const isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,25 +1,25 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
'use strict'
|
||||
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||
import { createRunner } from './Backup.mjs'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
import { Xapi } from '@xen-orchestra/xapi'
|
||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
||||
|
||||
createCachedLookup().patchGlobal()
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const logger = createLogger('xo:backups:worker')
|
||||
catchGlobalErrors(logger)
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
@@ -48,7 +48,7 @@ class BackupWorker {
|
||||
}
|
||||
|
||||
run() {
|
||||
return createRunner({
|
||||
return new Backup({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
@@ -1,11 +1,13 @@
|
||||
import cancelable from 'promise-toolbox/cancelable'
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
'use strict'
|
||||
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
export const cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
@@ -1,18 +1,19 @@
|
||||
import * as UUID from 'uuid'
|
||||
import sum from 'lodash/sum.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { Constants, openVhd, VhdAbstract, VhdFile } from 'vhd-lib'
|
||||
import { isVhdAlias, resolveVhdAlias } from 'vhd-lib/aliases.js'
|
||||
import { dirname, resolve } from 'node:path'
|
||||
import { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } from './_backupType.mjs'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { mergeVhdChain } from 'vhd-lib/merge.js'
|
||||
|
||||
import { Task } from './Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import handlerPath from '@xen-orchestra/fs/path'
|
||||
'use strict'
|
||||
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
@@ -116,7 +117,7 @@ const listVhds = async (handler, vmDir, logWarn) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
export async function checkAliases(
|
||||
async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
@@ -175,9 +176,11 @@ export async function checkAliases(
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
export async function cleanVm(
|
||||
exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{
|
||||
fixMetadata,
|
||||
@@ -1,19 +1,19 @@
|
||||
import test from 'test'
|
||||
import { strict as assert } from 'node:assert'
|
||||
'use strict'
|
||||
|
||||
import tmp from 'tmp'
|
||||
import fs from 'fs-extra'
|
||||
import * as uuid from 'uuid'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { VHDFOOTER, VHDHEADER } from './tests.fixtures.mjs'
|
||||
import { VhdFile, Constants, VhdDirectory, VhdAbstract } from 'vhd-lib'
|
||||
import { checkAliases } from './_cleanVm.mjs'
|
||||
import { dirname, basename } from 'node:path'
|
||||
import { rimraf } from 'rimraf'
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
|
||||
const { beforeEach, afterEach, describe } = test
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
@@ -1,10 +1,12 @@
|
||||
import { pipeline } from 'node:stream'
|
||||
import { ThrottleGroup } from '@kldzj/stream-throttle'
|
||||
import identity from 'lodash/identity.js'
|
||||
'use strict'
|
||||
|
||||
const { pipeline } = require('node:stream')
|
||||
const { ThrottleGroup } = require('@kldzj/stream-throttle')
|
||||
const identity = require('lodash/identity.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
export default function createStreamThrottle(rate) {
|
||||
module.exports = function createStreamThrottle(rate) {
|
||||
if (rate === 0) {
|
||||
return identity
|
||||
}
|
||||
@@ -1,22 +1,24 @@
|
||||
import find from 'lodash/find.js'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import omit from 'lodash/omit.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { CancelToken } from 'promise-toolbox'
|
||||
import { compareVersions } from 'compare-versions'
|
||||
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||
import { defer } from 'golike-defer'
|
||||
'use strict'
|
||||
|
||||
import { cancelableMap } from './_cancelableMap.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import pick from 'lodash/pick.js'
|
||||
const find = require('lodash/find.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const omit = require('lodash/omit.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { compareVersions } = require('compare-versions')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
export const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const pick = require('lodash/pick.js')
|
||||
|
||||
export const TAG_COPY_SRC = 'xo:copy_of'
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
|
||||
const TAG_BACKUP_SR = 'xo:backup:sr'
|
||||
const TAG_COPY_SRC = 'xo:copy_of'
|
||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
@@ -31,7 +33,7 @@ const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
return ref
|
||||
}
|
||||
|
||||
export async function exportIncrementalVm(
|
||||
exports.exportDeltaVm = async function exportDeltaVm(
|
||||
vm,
|
||||
baseVm,
|
||||
{
|
||||
@@ -141,28 +143,25 @@ export async function exportIncrementalVm(
|
||||
)
|
||||
}
|
||||
|
||||
export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
$defer,
|
||||
incrementalVm,
|
||||
deltaVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
||||
) {
|
||||
const { version } = incrementalVm
|
||||
const { version } = deltaVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const vmRecord = incrementalVm.vm
|
||||
const vmRecord = deltaVm.vm
|
||||
const xapi = sr.$xapi
|
||||
|
||||
let baseVm
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(
|
||||
xapi.objects.all,
|
||||
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
|
||||
)
|
||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||
@@ -184,15 +183,15 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
}
|
||||
})
|
||||
const vdiRecords = incrementalVm.vdis
|
||||
const vdiRecords = deltaVm.vdis
|
||||
|
||||
// 0. Create suspend_VDI
|
||||
let suspendVdi
|
||||
if (vmRecord.suspend_VDI !== undefined && vmRecord.suspend_VDI !== 'OpaqueRef:NULL') {
|
||||
if (vmRecord.power_state === 'Suspended') {
|
||||
const vdi = vdiRecords[vmRecord.suspend_VDI]
|
||||
if (vdi === undefined) {
|
||||
Task.warning('Suspend VDI not available for this suspended VM', {
|
||||
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
|
||||
vm: pick(vmRecord, 'uuid', 'name_label'),
|
||||
})
|
||||
} else {
|
||||
suspendVdi = await xapi.getRecord(
|
||||
@@ -241,7 +240,7 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
await asyncMap(await xapi.getField('VM', vmRef, 'VBDs'), ref => ignoreErrors.call(xapi.call('VBD.destroy', ref)))
|
||||
|
||||
// 3. Create VDIs & VBDs.
|
||||
const vbdRecords = incrementalVm.vbds
|
||||
const vbdRecords = deltaVm.vbds
|
||||
const vbds = groupBy(vbdRecords, 'VDI')
|
||||
const newVdis = {}
|
||||
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
|
||||
@@ -310,7 +309,7 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = incrementalVm
|
||||
const { streams } = deltaVm
|
||||
|
||||
await Promise.all([
|
||||
// Import VDI contents.
|
||||
@@ -327,7 +326,7 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
}),
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(Object.values(incrementalVm.vifs), vif => {
|
||||
asyncMap(Object.values(deltaVm.vifs), vif => {
|
||||
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
@@ -359,8 +358,8 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
])
|
||||
|
||||
await Promise.all([
|
||||
incrementalVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', incrementalVm.vm.name_label),
|
||||
deltaVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', deltaVm.vm.name_label),
|
||||
])
|
||||
|
||||
return vmRef
|
||||
8
@xen-orchestra/backups/_filenameDate.js
Normal file
8
@xen-orchestra/backups/_filenameDate.js
Normal file
@@ -0,0 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const { utcFormat, utcParse } = require('d3-time-format')
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
exports.formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
exports.parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,6 +0,0 @@
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
export const parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,13 +1,14 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { finished, PassThrough } from 'node:stream'
|
||||
'use strict'
|
||||
|
||||
const { debug } = createLogger('xo:backups:forkStreamUnpipe')
|
||||
const { finished, PassThrough } = require('node:stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
export function forkStreamUnpipe(source) {
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
|
||||
const { forks = 0 } = source
|
||||
source.forks = forks + 1
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
// returns all entries but the last retention-th
|
||||
export function getOldEntries(retention, entries) {
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
@@ -1,11 +1,13 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { join } from 'node:path'
|
||||
import { mkdir, rmdir } from 'node:fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
export async function getTmpDir() {
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
8
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
8
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
@@ -0,0 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
export const BACKUP_DIR = 'xo-vm-backups'
|
||||
|
||||
export function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
import assert from 'node:assert'
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
|
||||
const COMPRESSED_MAGIC_NUMBERS = [
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
@@ -45,7 +47,7 @@ const isValidTar = async (handler, size, fd) => {
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
export async function isValidXva(path) {
|
||||
async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
|
||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||
@@ -72,5 +74,6 @@ export async function isValidXva(path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
exports.isValidXva = isValidXva
|
||||
|
||||
const noop = Function.prototype
|
||||
@@ -1,7 +1,9 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
const { debug } = createLogger('xo:backups:listPartitions')
|
||||
|
||||
@@ -22,7 +24,8 @@ const IGNORED_PARTITION_TYPES = {
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
export const LVM_PARTITION_TYPE = 0x8e
|
||||
const LVM_PARTITION_TYPE = 0x8e
|
||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
||||
|
||||
const parsePartxLine = createParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
@@ -30,7 +33,7 @@ const parsePartxLine = createParser({
|
||||
})
|
||||
|
||||
// returns an empty array in case of a non-partitioned disk
|
||||
export async function listPartitions(devicePath) {
|
||||
exports.listPartitions = async function listPartitions(devicePath) {
|
||||
const parts = await fromCallback(execFile, 'partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
@@ -1,6 +1,8 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -27,5 +29,5 @@ const makeFunction =
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
export const lvs = makeFunction('lvs')
|
||||
export const pvs = makeFunction('pvs')
|
||||
exports.lvs = makeFunction('lvs')
|
||||
exports.pvs = makeFunction('pvs')
|
||||
@@ -1,132 +0,0 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { PoolMetadataBackup } from './_PoolMetadataBackup.mjs'
|
||||
import { XoMetadataBackup } from './_XoMetadataBackup.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
export const Metadata = class MetadataBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { FullRemote } from './_vmRunners/FullRemote.mjs'
|
||||
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
|
||||
|
||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
timeout: 0,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
() => this._getAdapter(job.sourceRemote),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id => id !== job.sourceRemote && this._getAdapter(id))
|
||||
),
|
||||
async ({ adapter: sourceRemoteAdapter }, healthCheckSr, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => !!_)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmsUuids = await sourceRemoteAdapter.listAllVms()
|
||||
|
||||
Task.info('vms', { vms: vmsUuids })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vmUuid] },
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalRemote(opts)
|
||||
} else if (job.mode === 'full') {
|
||||
vmBackup = new FullRemote(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
|
||||
}
|
||||
|
||||
return runTask(taskStart, () => vmBackup.run())
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmsUuids, !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
|
||||
import { FullXapi } from './_vmRunners/FullXapi.mjs'
|
||||
|
||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
diskPerVmConcurrency: 0, // not limited by default
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
return this._getRecord('VM', vmUuid).then(
|
||||
disposableVm =>
|
||||
Disposable.use(disposableVm, vm => {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
return runTask(taskStart, () => {
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalXapi(opts)
|
||||
} else {
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullXapi(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
}
|
||||
}
|
||||
return vmBackup.run()
|
||||
})
|
||||
}),
|
||||
error =>
|
||||
runTask(taskStart, () => {
|
||||
throw error
|
||||
})
|
||||
)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import pTimeout from 'promise-toolbox/timeout'
|
||||
import { compileTemplate } from '@xen-orchestra/template'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { RemoteTimeoutError } from './_RemoteTimeoutError.mjs'
|
||||
|
||||
export const DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
export const Abstract = class AbstractRunner {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const baseSettings = this._computeBaseSettings(config, job)
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
|
||||
const { getRemoteTimeout } = this._settings
|
||||
this._getAdapter = async function (remoteId) {
|
||||
try {
|
||||
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
|
||||
|
||||
return new Disposable(() => disposable.dispose(), {
|
||||
adapter: disposable.value,
|
||||
remoteId,
|
||||
})
|
||||
} catch (error) {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id: remoteId },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
export class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
export function getAdaptersByRemote(adapters) {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
export const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
@@ -1,50 +0,0 @@
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
|
||||
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return FullRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'full')
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
const stream = await this._sourceRemoteAdapter.readFullVmBackup(metadata)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
// @todo shouldn't transfer backup if it will be deleted by retention policy (higher retention on source than destination)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
sizeContainer,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(FullRemote, {
|
||||
_run: defer,
|
||||
})
|
||||
@@ -1,63 +0,0 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
|
||||
import { FullXapiWriter } from '../_writers/FullXapiWriter.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||
|
||||
const { debug } = createLogger('xo:backups:FullXapiVmBackup')
|
||||
|
||||
export const FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||
_getWriters() {
|
||||
return [FullRemoteWriter, FullXapiWriter]
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
const vm = this._vm
|
||||
|
||||
const settings = this._settings
|
||||
return (
|
||||
settings.unconditionalSnapshot ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
)
|
||||
}
|
||||
_selectBaseVm() {}
|
||||
|
||||
async _copy() {
|
||||
const { compression } = this.job
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import assert from 'node:assert'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
|
||||
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return IncrementalRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
|
||||
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
|
||||
useChain: false,
|
||||
})
|
||||
|
||||
const differentialVhds = {}
|
||||
|
||||
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(incrementalExport),
|
||||
differentialVhds,
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const IncrementalRemote = IncrementalRemoteVmBackupRunner
|
||||
decorateMethodsWith(IncrementalRemoteVmBackupRunner, {
|
||||
_run: defer,
|
||||
})
|
||||
@@ -1,173 +0,0 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { pipeline } from 'node:stream'
|
||||
import findLast from 'lodash/findLast.js'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import keyBy from 'lodash/keyBy.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import vhdStreamValidator from 'vhd-lib/vhdStreamValidator.js'
|
||||
|
||||
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||
import { exportIncrementalVm } from '../../_incrementalVm.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { IncrementalXapiWriter } from '../_writers/IncrementalXapiWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
|
||||
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
||||
_getWriters() {
|
||||
return [IncrementalRemoteWriter, IncrementalXapiWriter]
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
return true
|
||||
}
|
||||
|
||||
async _copy() {
|
||||
const baseVm = this._baseVm
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||
// except the suspended VDI
|
||||
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
|
||||
const differentialVhds = {}
|
||||
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
||||
// it should be done BEFORE any other stream transform
|
||||
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
if (this._settings.validateVhdStreams) {
|
||||
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
|
||||
}
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
differentialVhds,
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this._vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const [baseUuid, snapshotOf] = await Promise.all([
|
||||
xapi.getField('VDI', baseRef, 'uuid'),
|
||||
xapi.getField('VDI', baseRef, 'snapshot_of'),
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
export const Abstract = class AbstractVmBackupRunner {
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const tags = this._tags
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
// create a task to have an info in the logs and reports
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
() => {
|
||||
Task.info(`This VM doesn't match the health check's tags for this schedule`)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(), 'writer.healthCheck()')
|
||||
}
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
|
||||
import { Abstract } from './_Abstract.mjs'
|
||||
|
||||
export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
|
||||
constructor({
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}) {
|
||||
super()
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._sourceRemoteAdapter = sourceRemoteAdapter
|
||||
this._throttleStream = throttleStream
|
||||
this._vmUuid = vmUuid
|
||||
|
||||
const allSettings = job.settings
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const RemoteWriter = this._getRemoteWriter()
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
writers.add(
|
||||
new RemoteWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async _computeTransferList(predicate) {
|
||||
const vmBackups = await this._sourceRemoteAdapter.listVmBackups(this._vmUuid, predicate)
|
||||
const localMetada = new Map()
|
||||
Object.values(vmBackups).forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
localMetada.set(timestamp, metadata)
|
||||
})
|
||||
const nbRemotes = Object.keys(this.remoteAdapters).length
|
||||
const remoteMetadatas = {}
|
||||
await asyncEach(Object.values(this.remoteAdapters), async remoteAdapter => {
|
||||
const remoteMetadata = await remoteAdapter.listVmBackups(this._vmUuid, predicate)
|
||||
remoteMetadata.forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
remoteMetadatas[timestamp] = (remoteMetadatas[timestamp] ?? 0) + 1
|
||||
})
|
||||
})
|
||||
|
||||
let chain = []
|
||||
const timestamps = [...localMetada.keys()]
|
||||
timestamps.sort()
|
||||
for (const timestamp of timestamps) {
|
||||
if (remoteMetadatas[timestamp] !== nbRemotes) {
|
||||
// this backup is not present in all the remote
|
||||
// should be retransfered if not found later
|
||||
chain.push(localMetada.get(timestamp))
|
||||
} else {
|
||||
// backup is present in local and remote : the chain has already been transferred
|
||||
chain = []
|
||||
}
|
||||
}
|
||||
return chain
|
||||
}
|
||||
|
||||
async run() {
|
||||
const handler = this._sourceRemoteAdapter._handler
|
||||
await Disposable.use(await handler.lock(getVmBackupDir(this._vmUuid)), async () => {
|
||||
await this._run()
|
||||
await this._healthCheck()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,275 +0,0 @@
|
||||
import assert from 'node:assert'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { Abstract } from './_Abstract.mjs'
|
||||
|
||||
export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}) {
|
||||
super()
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
const tags = (this._tags = vm.tags)
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this._exportedVm = undefined
|
||||
this._vm = vm
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isIncremental = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._throttleStream = throttleStream
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._getWriters()
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(
|
||||
new BackupWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(
|
||||
new ReplicationWriter({
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
sr,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const vm = this._vm
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const vm = this._vm
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
if (this._mustDoSnapshot()) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this._exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this._exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this._exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this._vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this._vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async copy() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
_getWriters() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
await this._selectBaseVm()
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const vm = this._vm
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await this._copy()
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(AbstractXapi, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,11 +0,0 @@
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
|
||||
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||
|
||||
export function forkDeltaExport(deltaExport) {
|
||||
return Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
import { AbstractWriter } from './_AbstractWriter.mjs'
|
||||
|
||||
export class AbstractFullWriter extends AbstractWriter {
|
||||
async run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
|
||||
try {
|
||||
return await this._run({ timestamp, sizeContainer, stream, vm, vmSnapshot })
|
||||
} finally {
|
||||
// ensure stream is properly closed
|
||||
stream.destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
|
||||
export class AbstractWriter {
|
||||
constructor({ config, healthCheckSr, job, vmUuid, scheduleId, settings }) {
|
||||
this._config = config
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._job = job
|
||||
this._scheduleId = scheduleId
|
||||
this._settings = settings
|
||||
this._vmUuid = vmUuid
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`))
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
import { extractOpaqueRef } from '@xen-orchestra/xapi'
|
||||
import assert from 'node:assert/strict'
|
||||
|
||||
import { HealthCheckVmBackup } from '../../HealthCheckVmBackup.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
export const MixinXapiWriter = (BaseClass = Object) =>
|
||||
class MixinXapiWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
|
||||
// check if the base Vm has all its disk on health check sr
|
||||
async #isAlreadyOnHealthCheckSr(baseVm) {
|
||||
const xapi = baseVm.$xapi
|
||||
const vdiRefs = await xapi.VM_getDisks(baseVm.$ref)
|
||||
for (const vdiRef of vdiRefs) {
|
||||
const vdi = xapi.getObject(vdiRef)
|
||||
if (vdi.$SR.uuid !== this._healthCheckSr.uuid) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
healthCheck() {
|
||||
const sr = this._healthCheckSr
|
||||
assert.notStrictEqual(sr, undefined, 'SR should be defined before making a health check')
|
||||
assert.notEqual(this._targetVmRef, undefined, 'A vm should have been transfered to be health checked')
|
||||
// copy VM
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
async () => {
|
||||
const { $xapi: xapi } = sr
|
||||
let healthCheckVmRef
|
||||
try {
|
||||
const baseVm = xapi.getObject(this._targetVmRef) ?? (await xapi.waitObject(this._targetVmRef))
|
||||
|
||||
if (await this.#isAlreadyOnHealthCheckSr(baseVm)) {
|
||||
healthCheckVmRef = await Task.run(
|
||||
{ name: 'cloning-vm' },
|
||||
async () =>
|
||||
await xapi
|
||||
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
|
||||
.then(extractOpaqueRef)
|
||||
)
|
||||
} else {
|
||||
healthCheckVmRef = await Task.run(
|
||||
{ name: 'copying-vm' },
|
||||
async () =>
|
||||
await xapi
|
||||
.callAsync('VM.copy', this._targetVmRef, `Health Check - ${baseVm.name_label}`, sr.$ref)
|
||||
.then(extractOpaqueRef)
|
||||
)
|
||||
}
|
||||
const healthCheckVm = xapi.getObject(healthCheckVmRef) ?? (await xapi.waitObject(healthCheckVmRef))
|
||||
|
||||
await new HealthCheckVmBackup({
|
||||
restoredVm: healthCheckVm,
|
||||
xapi,
|
||||
}).run()
|
||||
} finally {
|
||||
healthCheckVmRef && (await xapi.VM_destroy(healthCheckVmRef))
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
import { openVhd } from 'vhd-lib'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
|
||||
export async function checkVhd(handler, path) {
|
||||
await Disposable.use(openVhd(handler, path), () => {})
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
export function watchStreamSize(stream, container = { size: 0 }) {
|
||||
'use strict'
|
||||
|
||||
exports.watchStreamSize = function watchStreamSize(stream, container = { size: 0 }) {
|
||||
stream.on('data', data => {
|
||||
container.size += data.length
|
||||
})
|
||||
@@ -94,13 +94,13 @@ In case any incoherence is detected, the file is deleted so it will be fully gen
|
||||
job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
├─ task.info(message: 'vms', data: { vms: string[] })
|
||||
├─ task.warning(message: string)
|
||||
├─ task.start(data: { type: 'VM', id: string, name_label?: string })
|
||||
├─ task.start(data: { type: 'VM', id: string })
|
||||
│ ├─ task.warning(message: string)
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'snapshot')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, name_label?: string, isFull: boolean })
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
|
||||
│ │ ├─ task.warning(message: string)
|
||||
│ │ ├─ task.start(message: 'transfer')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
@@ -171,16 +171,13 @@ job:
|
||||
# For replication jobs, indicates which SRs to use
|
||||
srs: IdPattern
|
||||
|
||||
type: 'backup' | 'mirrorBackup'
|
||||
# Here for historical reasons
|
||||
type: 'backup'
|
||||
|
||||
# Indicates which VMs to backup/replicate for a xapi to remote backup job
|
||||
# Indicates which VMs to backup/replicate
|
||||
vms: IdPattern
|
||||
|
||||
# Indicates which remote to read from for a mirror backup job
|
||||
sourceRemote: IdPattern
|
||||
|
||||
# Indicates which XAPI to use to connect to a specific VM or SR
|
||||
# for remote to remote backup job,this is only needed if there is healtcheck
|
||||
recordToXapi:
|
||||
[ObjectId]: XapiId
|
||||
|
||||
@@ -231,7 +228,7 @@ Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `cleanup()`
|
||||
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
|
||||
- `healthCheck(sr)`
|
||||
- **Full**
|
||||
- `run({ timestamp, sizeContainer, stream })`
|
||||
- `afterBackup()`
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script must be executed at the start of the machine.
|
||||
#
|
||||
# It must run as root to be able to use xenstore-read and xenstore-write
|
||||
|
||||
# fail in case of error or undefined variable
|
||||
set -eu
|
||||
|
||||
# stop there if a health check is not in progress
|
||||
if [ "$(xenstore-read vm-data/xo-backup-health-check 2>&1)" != planned ]
|
||||
then
|
||||
exit
|
||||
fi
|
||||
|
||||
# not necessary, but informs XO that this script has started which helps diagnose issues
|
||||
xenstore-write vm-data/xo-backup-health-check running
|
||||
|
||||
# put your test here
|
||||
#
|
||||
# in this example, the command `sqlite3` is used to validate the health of a database
|
||||
# and its output is captured and passed to XO via the XenStore in case of error
|
||||
if output=$(sqlite3 ~/my-database.sqlite3 .table 2>&1)
|
||||
then
|
||||
# inform XO everything is ok
|
||||
xenstore-write vm-data/xo-backup-health-check success
|
||||
else
|
||||
# inform XO there is an issue
|
||||
xenstore-write vm-data/xo-backup-health-check failure
|
||||
|
||||
# more info about the issue can be written to `vm-data/health-check-error`
|
||||
#
|
||||
# it will be shown in XO
|
||||
xenstore-write vm-data/xo-backup-health-check-error "$output"
|
||||
fi
|
||||
@@ -1,4 +1,6 @@
|
||||
export function extractIdsFromSimplePattern(pattern) {
|
||||
'use strict'
|
||||
|
||||
exports.extractIdsFromSimplePattern = function extractIdsFromSimplePattern(pattern) {
|
||||
if (pattern === undefined) {
|
||||
return []
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user