Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
543f44d1d1 fix(@»en-orchestra/backups): better cleaning of broken vhd
must be merged after #6471 and all tests passing
2022-11-14 10:54:31 +01:00
681 changed files with 8493 additions and 24417 deletions

View File

@@ -1 +0,0 @@
{ "extends": ["@commitlint/config-conventional"] }

View File

@@ -28,7 +28,7 @@ module.exports = {
},
},
{
files: ['*.{integ,spec,test}.{,c,m}js'],
files: ['*.{spec,test}.{,c,m}js'],
rules: {
'n/no-unpublished-require': 'off',
'n/no-unpublished-import': 'off',

View File

@@ -1,32 +0,0 @@
name: Continous Integration
on: push
jobs:
CI:
runs-on: ubuntu-latest
steps:
# https://github.com/actions/checkout
- uses: actions/checkout@v3
- name: Install packages
run: |
sudo apt-get update
sudo apt-get install -y curl qemu-utils python3-vmdkstream git libxml2-utils libfuse2 nbdkit
- name: Cache Turbo
# https://github.com/actions/cache
uses: actions/cache@v3
with:
path: '**/node_modules/.cache/turbo'
key: ${{ runner.os }}-turbo-cache
- name: Setup Node environment
# https://github.com/actions/setup-node
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'yarn'
- name: Install project dependencies
run: yarn
- name: Build the project
run: yarn build
- name: Lint tests
run: yarn test-lint
- name: Integration tests
run: sudo yarn test-integration

12
.github/workflows/push.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: CI
on: push
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Build docker image
run: docker-compose -f docker/docker-compose.dev.yml build
- name: Create the container and start the tests
run: docker-compose -f docker/docker-compose.dev.yml up --exit-code-from xo

1
.gitignore vendored
View File

@@ -34,4 +34,3 @@ yarn-error.log.*
# code coverage
.nyc_output/
coverage/
.turbo/

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
# Only check commit message if commit on master or first commit on another
# branch to avoid bothering fix commits after reviews
#
# FIXME: does not properly run with git commit --amend
if [ "$(git rev-parse --abbrev-ref HEAD)" = master ] || [ "$(git rev-list --count master..)" -eq 0 ]
then
npx --no -- commitlint --edit "$1"
fi

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx lint-staged

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
```sh
npm install --save @vates/async-each
```
> npm install --save @vates/async-each
```
## Usage

View File

@@ -33,7 +33,7 @@
"test": "node--test"
},
"devDependencies": {
"sinon": "^15.0.1",
"sinon": "^14.0.1",
"tap": "^16.3.0",
"test": "^3.2.1"
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/cached-dns.lookup):
```sh
npm install --save @vates/cached-dns.lookup
```
> npm install --save @vates/cached-dns.lookup
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-calls):
```sh
npm install --save @vates/coalesce-calls
```
> npm install --save @vates/coalesce-calls
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
```sh
npm install --save @vates/compose
```
> npm install --save @vates/compose
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/decorate-with):
```sh
npm install --save @vates/decorate-with
```
> npm install --save @vates/decorate-with
```
## Usage

View File

@@ -1,32 +0,0 @@
```js
import diff from '@vates/diff'
diff('foo bar baz', 'Foo qux')
// → [ 0, 'F', 4, 'qux', 7, '' ]
//
// Differences of the second string from the first one:
// - at position 0, it contains `F`
// - at position 4, it contains `qux`
// - at position 7, it ends
diff('Foo qux', 'foo bar baz')
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
//
// Differences of the second string from the first one:
// - at position 0, it contains f`
// - at position 4, it contains `bar`
// - at position 7, it contains `baz`
// works with all collections that supports
// - `.length`
// - `collection[index]`
// - `.slice(start, end)`
//
// which includes:
// - arrays
// - strings
// - `Buffer`
// - `TypedArray`
diff([0, 1, 2], [3, 4])
// → [ 0, [ 3, 4 ], 2, [] ]
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,65 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/diff
[![Package Version](https://badgen.net/npm/v/@vates/diff)](https://npmjs.org/package/@vates/diff) ![License](https://badgen.net/npm/license/@vates/diff) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/diff)](https://bundlephobia.com/result?p=@vates/diff) [![Node compatibility](https://badgen.net/npm/node/@vates/diff)](https://npmjs.org/package/@vates/diff)
> Computes differences between two arrays, buffers or strings
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/diff):
```sh
npm install --save @vates/diff
```
## Usage
```js
import diff from '@vates/diff'
diff('foo bar baz', 'Foo qux')
// → [ 0, 'F', 4, 'qux', 7, '' ]
//
// Differences of the second string from the first one:
// - at position 0, it contains `F`
// - at position 4, it contains `qux`
// - at position 7, it ends
diff('Foo qux', 'foo bar baz')
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
//
// Differences of the second string from the first one:
// - at position 0, it contains f`
// - at position 4, it contains `bar`
// - at position 7, it contains `baz`
// works with all collections that supports
// - `.length`
// - `collection[index]`
// - `.slice(start, end)`
//
// which includes:
// - arrays
// - strings
// - `Buffer`
// - `TypedArray`
diff([0, 1, 2], [3, 4])
// → [ 0, [ 3, 4 ], 2, [] ]
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,37 +0,0 @@
'use strict'
/**
* Compare two data arrays, buffers or strings and invoke the provided callback function for each difference.
*
* @template {Array|Buffer|string} T
* @param {Array|Buffer|string} data1 - The first data array or buffer to compare.
* @param {T} data2 - The second data array or buffer to compare.
* @param {(index: number, diff: T) => void} [cb] - The callback function to invoke for each difference. If not provided, an array of differences will be returned.
* @returns {Array<number|T>|undefined} - An array of differences if no callback is provided, otherwise undefined.
*/
module.exports = function diff(data1, data2, cb) {
let result
if (cb === undefined) {
result = []
cb = result.push.bind(result)
}
const n1 = data1.length
const n2 = data2.length
const n = Math.min(n1, n2)
for (let i = 0; i < n; ++i) {
if (data1[i] !== data2[i]) {
let j = i + 1
while (j < n && data1[j] !== data2[j]) {
++j
}
cb(i, data2.slice(i, j))
i = j
}
}
if (n1 !== n2) {
cb(n, n1 < n2 ? data2.slice(n) : data2.slice(0, 0))
}
return result
}

View File

@@ -1,51 +0,0 @@
'use strict'
const assert = require('node:assert/strict')
const test = require('test')
const diff = require('./index.js')
test('data of equal length', function () {
const data1 = 'foo bar baz'
const data2 = 'baz bar foo'
assert.deepEqual(diff(data1, data2), [0, 'baz', 8, 'foo'])
})
test('data1 is longer', function () {
const data1 = 'foo bar'
const data2 = 'foo'
assert.deepEqual(diff(data1, data2), [3, ''])
})
test('data2 is longer', function () {
const data1 = 'foo'
const data2 = 'foo bar'
assert.deepEqual(diff(data1, data2), [3, ' bar'])
})
test('with arrays', function () {
const data1 = 'foo bar baz'.split('')
const data2 = 'baz bar foo'.split('')
assert.deepEqual(diff(data1, data2), [0, 'baz'.split(''), 8, 'foo'.split('')])
})
test('with buffers', function () {
const data1 = Buffer.from('foo bar baz')
const data2 = Buffer.from('baz bar foo')
assert.deepEqual(diff(data1, data2), [0, Buffer.from('baz'), 8, Buffer.from('foo')])
})
test('cb param', function () {
const data1 = 'foo bar baz'
const data2 = 'baz bar foo'
const calls = []
const cb = (...args) => calls.push(args)
diff(data1, data2, cb)
assert.deepEqual(calls, [
[0, 'baz'],
[8, 'foo'],
])
})

View File

@@ -1,36 +0,0 @@
{
"private": false,
"name": "@vates/diff",
"description": "Computes differences between two arrays, buffers or strings",
"keywords": [
"array",
"binary",
"buffer",
"diff",
"differences",
"string"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/diff",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/diff",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.3.0"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
```sh
npm install --save @vates/disposable
```
> npm install --save @vates/disposable
```
## Usage

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.4",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},
@@ -25,11 +25,11 @@
"dependencies": {
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/log": "^0.4.0",
"ensure-array": "^1.0.0"
},
"devDependencies": {
"sinon": "^15.0.1",
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/event-listeners-manager):
```sh
npm install --save @vates/event-listeners-manager
```
> npm install --save @vates/event-listeners-manager
```
## Usage

View File

@@ -21,7 +21,7 @@
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.4.0"
"vhd-lib": "^4.1.1"
},
"scripts": {
"postversion": "npm publish --access public"

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
```sh
npm install --save @vates/multi-key-map
```
> npm install --save @vates/multi-key-map
```
## Usage

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/nbd-client):
```sh
npm install --save @vates/nbd-client
```
> npm install --save @vates/nbd-client
```
## Usage

View File

@@ -16,13 +16,9 @@ const {
NBD_REPLY_MAGIC,
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
NBD_CMD_DISC,
} = require('./constants.js')
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
const { fromCallback } = require('promise-toolbox')
const { readChunkStrict } = require('@vates/read-chunk')
const { createLogger } = require('@xen-orchestra/log')
const { warn } = createLogger('vates:nbd-client')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
@@ -35,34 +31,18 @@ module.exports = class NbdClient {
#exportName
#exportSize
#waitBeforeReconnect
#readAhead
#readBlockRetries
#reconnectRetry
#connectTimeout
// AFAIK, there is no guaranty the server answers in the same order as the queries
// so we handle a backlog of command waiting for response and handle concurrency manually
#waitingForResponse // there is already a listenner waiting for a response
#nextCommandQueryId = BigInt(0)
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
#connected = false
#reconnectingPromise
constructor(
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
) {
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
this.#serverAddress = address
this.#serverPort = port
this.#exportName = exportname
this.#serverCert = cert
this.#waitBeforeReconnect = waitBeforeReconnect
this.#readAhead = readAhead
this.#readBlockRetries = readBlockRetries
this.#reconnectRetry = reconnectRetry
this.#connectTimeout = connectTimeout
}
get exportSize() {
@@ -97,55 +77,19 @@ module.exports = class NbdClient {
})
}
async #connect() {
// first we connect to the server without tls, and then we upgrade the connection
async connect() {
// first we connect to the serve without tls, and then we upgrade the connection
// to tls during the handshake
await this.#unsecureConnect()
await this.#handshake()
this.#connected = true
// reset internal state if we reconnected a nbd client
this.#commandQueryBacklog = new Map()
this.#waitingForResponse = false
}
async connect() {
return pTimeout.call(this.#connect(), this.#connectTimeout)
}
async disconnect() {
if (!this.#connected) {
return
}
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
await this.#write(buffer)
await this.#serverSocket.destroy()
this.#serverSocket = undefined
this.#connected = false
}
#clearReconnectPromise = () => {
this.#reconnectingPromise = undefined
}
async #reconnect() {
await this.disconnect().catch(() => {})
await pDelay(this.#waitBeforeReconnect) // need to let the xapi clean things on its side
await this.connect()
}
async reconnect() {
// we need to ensure reconnections do not occur in parallel
if (this.#reconnectingPromise === undefined) {
this.#reconnectingPromise = pRetry(() => this.#reconnect(), {
tries: this.#reconnectRetry,
})
this.#reconnectingPromise.then(this.#clearReconnectPromise, this.#clearReconnectPromise)
}
return this.#reconnectingPromise
}
// we can use individual read/write from the socket here since there is no concurrency
@@ -223,6 +167,7 @@ module.exports = class NbdClient {
this.#commandQueryBacklog.forEach(({ reject }) => {
reject(error)
})
await this.disconnect()
}
async #readBlockResponse() {
@@ -230,6 +175,7 @@ module.exports = class NbdClient {
if (this.#waitingForResponse) {
return
}
try {
this.#waitingForResponse = true
const magic = await this.#readInt32()
@@ -254,8 +200,7 @@ module.exports = class NbdClient {
query.resolve(data)
this.#waitingForResponse = false
if (this.#commandQueryBacklog.size > 0) {
// it doesn't throw directly but will throw all relevant promise on failure
this.#readBlockResponse()
await this.#readBlockResponse()
}
} catch (error) {
// reject all the promises
@@ -266,11 +211,6 @@ module.exports = class NbdClient {
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
// we don't want to add anything in backlog while reconnecting
if (this.#reconnectingPromise) {
await this.#reconnectingPromise
}
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
@@ -285,67 +225,19 @@ module.exports = class NbdClient {
buffer.writeInt32BE(size, 24)
return new Promise((resolve, reject) => {
function decoratedReject(error) {
error.index = index
error.size = size
reject(error)
}
// this will handle one block response, but it can be another block
// since server does not guaranty to handle query in order
this.#commandQueryBacklog.set(queryId, {
size,
resolve,
reject: decoratedReject,
reject,
})
// really send the command to the server
this.#write(buffer).catch(decoratedReject)
this.#write(buffer).catch(reject)
// #readBlockResponse never throws directly
// but if it fails it will reject all the promises in the backlog
this.#readBlockResponse()
})
}
async *readBlocks(indexGenerator) {
// default : read all blocks
if (indexGenerator === undefined) {
const exportSize = this.#exportSize
const chunkSize = 2 * 1024 * 1024
indexGenerator = function* () {
const nbBlocks = Math.ceil(exportSize / chunkSize)
for (let index = 0; index < nbBlocks; index++) {
yield { index, size: chunkSize }
}
}
}
const readAhead = []
const readAheadMaxLength = this.#readAhead
const makeReadBlockPromise = (index, size) => {
const promise = pRetry(() => this.readBlock(index, size), {
tries: this.#readBlockRetries,
onRetry: async err => {
warn('will retry reading block ', index, err)
await this.reconnect()
},
})
// error is handled during unshift
promise.catch(() => {})
return promise
}
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
for (const { index, size } of indexGenerator()) {
// stack readAheadMaxLength promises before starting to handle the results
if (readAhead.length === readAheadMaxLength) {
// any error will stop reading blocks
yield readAhead.shift()
}
readAhead.push(makeReadBlockPromise(index, size))
}
while (readAhead.length > 0) {
yield readAhead.shift()
}
}
}

View File

@@ -13,17 +13,16 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.2.0",
"version": "1.0.0",
"engines": {
"node": ">=14.0"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.1.1",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"promise-toolbox": "^0.21.0",
"xen-api": "^1.3.1"
"xen-api": "^1.2.2"
},
"devDependencies": {
"tap": "^16.3.0",
@@ -31,6 +30,6 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "tap --lines 70 --functions 36 --branches 54 --statements 69 *.integ.js"
"test-integration": "tap *.spec.js"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/otp):
```sh
npm install --save @vates/otp
```
> npm install --save @vates/otp
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/parse-duration):
```sh
npm install --save @vates/parse-duration
```
> npm install --save @vates/parse-duration
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
```sh
npm install --save @vates/predicates
```
> npm install --save @vates/predicates
```
## Usage

View File

@@ -24,25 +24,3 @@ import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
### `skip(stream, size)`
Skips a given number of bytes from a stream.
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
```js
import { skip } from '@vates/read-chunk'
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
```
### `skipStrict(stream, size)`
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
```js
import { skipStrict } from '@vates/read-chunk'
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
```

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
```sh
npm install --save @vates/read-chunk
```
> npm install --save @vates/read-chunk
```
## Usage
@@ -43,28 +43,6 @@ import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
### `skip(stream, size)`
Skips a given number of bytes from a stream.
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
```js
import { skip } from '@vates/read-chunk'
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
```
### `skipStrict(stream, size)`
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
```js
import { skipStrict } from '@vates/read-chunk'
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -1,36 +1,11 @@
'use strict'
const assert = require('assert')
/**
* Read a chunk of data from a stream.
*
* The returned promise is rejected if there is an error while reading the stream.
*
* For streams in object mode, the returned promise resolves to a single object read from the stream.
*
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
*
* If `size` bytes are not available to be read, `null` will be returned *unless* the stream has ended, in which case all of the data remaining will be returned.
*
* @param {Readable} stream - A readable stream to read from.
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
* @returns {Promise<Buffer|string|unknown|null>} - A Promise that resolves to the read chunk if available, or null if end of stream is reached.
*/
const readChunk = (stream, size) =>
stream.errored != null
? Promise.reject(stream.errored)
: stream.closed || stream.readableEnded
stream.closed || stream.readableEnded
? Promise.resolve(null)
: size === 0
? Promise.resolve(Buffer.alloc(0))
: new Promise((resolve, reject) => {
if (size !== undefined) {
assert(size > 0)
// per Node documentation:
// > The size argument must be less than or equal to 1 GiB.
assert(size < 1073741824)
}
function onEnd() {
resolve(null)
removeListeners()
@@ -58,21 +33,6 @@ const readChunk = (stream, size) =>
})
exports.readChunk = readChunk
/**
* Read a chunk of data from a stream.
*
* The returned promise is rejected if there is an error while reading the stream.
*
* For streams in object mode, the returned promise resolves to a single object read from the stream.
*
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
*
* If `size` bytes are not available to be read, the returned promise is rejected.
*
* @param {Readable} stream - A readable stream to read from.
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
* @returns {Promise<Buffer|string|unknown>} - A Promise that resolves to the read chunk.
*/
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {
@@ -80,7 +40,7 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
}
if (size !== undefined && chunk.length !== size) {
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
const error = new Error('stream has ended with not enough data')
Object.defineProperties(error, {
chunk: {
value: chunk,
@@ -91,69 +51,3 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
return chunk
}
/**
* Skips a given number of bytes from a readable stream.
*
* @param {Readable} stream - A readable stream to skip bytes from.
* @param {number} size - The number of bytes to skip.
* @returns {Promise<number>} A Promise that resolves to the number of bytes actually skipped. If the end of the stream is reached before all bytes are skipped, the Promise resolves to the number of bytes that were skipped before the end of the stream was reached. The Promise is rejected if there is an error while reading from the stream.
*/
async function skip(stream, size) {
return stream.errored != null
? Promise.reject(stream.errored)
: size === 0 || stream.closed || stream.readableEnded
? Promise.resolve(0)
: new Promise((resolve, reject) => {
let left = size
function onEnd() {
resolve(size - left)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read()
left -= data === null ? 0 : data.length
if (left > 0) {
// continue to read
} else {
// if more than wanted has been read, push back the rest
if (left < 0) {
stream.unshift(data.slice(left))
}
resolve(size)
removeListeners()
}
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
exports.skip = skip
/**
* Skips a given number of bytes from a stream.
*
* @param {Readable} stream - A readable stream to skip bytes from.
* @param {number} size - The number of bytes to skip.
* @returns {Promise<void>} - A Promise that resolves when the exact number of bytes have been skipped. The Promise is rejected if there is an error while reading from the stream or the stream ends before the exact number of bytes have been skipped.
*/
exports.skipStrict = async function skipStrict(stream, size) {
const bytesSkipped = await skip(stream, size)
if (bytesSkipped !== size) {
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
error.bytesSkipped = bytesSkipped
throw error
}
}

View File

@@ -5,58 +5,12 @@ const assert = require('node:assert').strict
const { Readable } = require('stream')
const { readChunk, readChunkStrict, skip, skipStrict } = require('./')
const { readChunk, readChunkStrict } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
const makeErrorTests = fn => {
it('rejects if the stream errors', async () => {
const error = new Error()
const stream = makeStream([])
const pError = rejectionOf(fn(stream, 10))
stream.destroy(error)
assert.strict(await pError, error)
})
// only supported for Node >= 18
if (process.versions.node.split('.')[0] >= 18) {
it('rejects if the stream has already errored', async () => {
const error = new Error()
const stream = makeStream([])
await new Promise(resolve => {
stream.once('error', resolve).destroy(error)
})
assert.strict(await rejectionOf(fn(stream, 10)), error)
})
}
}
describe('readChunk', () => {
it('rejects if size is less than or equal to 0', async () => {
const error = await rejectionOf(readChunk(makeStream([]), 0))
assert.strictEqual(error.code, 'ERR_ASSERTION')
})
it('rejects if size is greater than or equal to 1 GiB', async () => {
const error = await rejectionOf(readChunk(makeStream([]), 1024 * 1024 * 1024))
assert.strictEqual(error.code, 'ERR_ASSERTION')
})
makeErrorTests(readChunk)
it('returns null if stream is empty', async () => {
assert.strictEqual(await readChunk(makeStream([])), null)
})
@@ -84,6 +38,10 @@ describe('readChunk', () => {
it('returns less data if stream ends', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
})
it('returns an empty buffer if the specified size is 0', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
})
})
describe('with object stream', () => {
@@ -94,6 +52,14 @@ describe('readChunk', () => {
})
})
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
@@ -105,43 +71,7 @@ describe('readChunkStrict', function () {
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
assert.strictEqual(error.message, 'stream has ended with not enough data')
assert.deepEqual(error.chunk, Buffer.from('foobar'))
})
})
describe('skip', function () {
makeErrorTests(skip)
it('returns 0 if size is 0', async () => {
assert.strictEqual(await skip(makeStream(['foo']), 0), 0)
})
it('returns 0 if the stream is already ended', async () => {
const stream = await makeStream([])
await readChunk(stream)
assert.strictEqual(await skip(stream, 10), 0)
})
it('skips a number of bytes', async () => {
const stream = makeStream('foo bar')
assert.strictEqual(await skip(stream, 4), 4)
assert.deepEqual(await readChunk(stream, 4), Buffer.from('bar'))
})
it('returns less size if stream ends', async () => {
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
})
})
describe('skipStrict', function () {
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(skipStrict(makeStream('foo bar'), 10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
assert.deepEqual(error.bytesSkipped, 7)
})
})

View File

@@ -19,7 +19,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.1.1",
"version": "1.0.1",
"engines": {
"node": ">=8.10"
},

View File

@@ -1,42 +0,0 @@
```js
import StreamReader from '@vates/stream-reader'
const reader = new StreamReader(stream)
```
### `.read([size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
const chunk = await reader.read(512)
```
### `.readStrict([size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
const chunk = await reader.readStrict(512)
```
### `.skip(size)`
Skips a given number of bytes from a stream.
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
```js
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
```
### `.skipStrict(size)`
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
```js
await reader.skipStrict(2 * 1024 * 1024 * 1024)
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,75 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/stream-reader
[![Package Version](https://badgen.net/npm/v/@vates/stream-reader)](https://npmjs.org/package/@vates/stream-reader) ![License](https://badgen.net/npm/license/@vates/stream-reader) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/stream-reader)](https://bundlephobia.com/result?p=@vates/stream-reader) [![Node compatibility](https://badgen.net/npm/node/@vates/stream-reader)](https://npmjs.org/package/@vates/stream-reader)
> Efficiently reads and skips chunks of a given size in a stream
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/stream-reader):
```sh
npm install --save @vates/stream-reader
```
## Usage
```js
import StreamReader from '@vates/stream-reader'
const reader = new StreamReader(stream)
```
### `.read([size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
const chunk = await reader.read(512)
```
### `.readStrict([size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
const chunk = await reader.readStrict(512)
```
### `.skip(size)`
Skips a given number of bytes from a stream.
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
```js
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
```
### `.skipStrict(size)`
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
```js
await reader.skipStrict(2 * 1024 * 1024 * 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,123 +0,0 @@
'use strict'
const assert = require('node:assert')
const { finished, Readable } = require('node:stream')
const noop = Function.prototype
// Inspired by https://github.com/nodejs/node/blob/85705a47958c9ae5dbaa1f57456db19bdefdc494/lib/internal/streams/readable.js#L1107
class StreamReader {
#ended = false
#error
#executor = resolve => {
this.#resolve = resolve
}
#stream
#resolve = noop
constructor(stream) {
stream = typeof stream.pipe === 'function' ? stream : Readable.from(stream)
this.#stream = stream
stream.on('readable', () => this.#resolve())
finished(stream, { writable: false }, error => {
this.#error = error
this.#ended = true
this.#resolve()
})
}
async read(size) {
if (size !== undefined) {
assert(size > 0)
}
do {
if (this.#ended) {
if (this.#error) {
throw this.#error
}
return null
}
const value = this.#stream.read(size)
if (value !== null) {
return value
}
await new Promise(this.#executor)
} while (true)
}
async readStrict(size) {
const chunk = await this.read(size)
if (chunk === null) {
throw new Error('stream has ended without data')
}
if (size !== undefined && chunk.length !== size) {
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
Object.defineProperties(error, {
chunk: {
value: chunk,
},
})
throw error
}
return chunk
}
async skip(size) {
if (size === 0) {
return size
}
let toSkip = size
do {
if (this.#ended) {
if (this.#error) {
throw this.#error
}
return size - toSkip
}
const data = this.#stream.read()
if (data !== null) {
toSkip -= data === null ? 0 : data.length
if (toSkip > 0) {
// continue to read
} else {
// if more than wanted has been read, push back the rest
if (toSkip < 0) {
this.#stream.unshift(data.slice(toSkip))
}
return size
}
}
await new Promise(this.#executor)
} while (true)
}
async skipStrict(size) {
const bytesSkipped = await this.skip(size)
if (bytesSkipped !== size) {
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
error.bytesSkipped = bytesSkipped
throw error
}
}
}
StreamReader.prototype[Symbol.asyncIterator] = async function* asyncIterator() {
let chunk
while ((chunk = await this.read()) !== null) {
yield chunk
}
}
module.exports = StreamReader

View File

@@ -1,141 +0,0 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert').strict
const { Readable } = require('stream')
const StreamReader = require('./index.js')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
const makeErrorTests = method => {
it('rejects if the stream errors', async () => {
const error = new Error()
const stream = makeStream([])
const pError = rejectionOf(new StreamReader(stream)[method](10))
stream.destroy(error)
assert.strict(await pError, error)
})
it('rejects if the stream has already errored', async () => {
const error = new Error()
const stream = makeStream([])
await new Promise(resolve => {
stream.once('error', resolve).destroy(error)
})
assert.strict(await rejectionOf(new StreamReader(stream)[method](10)), error)
})
}
describe('read()', () => {
it('rejects if size is less than or equal to 0', async () => {
const error = await rejectionOf(new StreamReader(makeStream([])).read(0))
assert.strictEqual(error.code, 'ERR_ASSERTION')
})
it('returns null if stream is empty', async () => {
assert.strictEqual(await new StreamReader(makeStream([])).read(), null)
})
makeErrorTests('read')
it('returns null if the stream is already ended', async () => {
const reader = new StreamReader(makeStream([]))
await reader.read()
assert.strictEqual(await reader.read(), null)
})
describe('with binary stream', () => {
it('returns the first chunk of data', async () => {
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(), Buffer.from('foo'))
})
it('returns a chunk of the specified size (smaller than first)', async () => {
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(2), Buffer.from('fo'))
})
it('returns a chunk of the specified size (larger than first)', async () => {
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(4), Buffer.from('foob'))
})
it('returns less data if stream ends', async () => {
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(10), Buffer.from('foobar'))
})
})
describe('with object stream', () => {
it('returns the first chunk of data verbatim', async () => {
const chunks = [{}, {}]
assert.strictEqual(await new StreamReader(makeStream.obj(chunks)).read(), chunks[0])
})
})
})
describe('readStrict()', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(new StreamReader(makeStream([])).readStrict())
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended without data')
assert.strictEqual(error.chunk, undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(new StreamReader(makeStream(['foo', 'bar'])).readStrict(10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
assert.deepEqual(error.chunk, Buffer.from('foobar'))
})
})
describe('skip()', function () {
makeErrorTests('skip')
it('returns 0 if size is 0', async () => {
assert.strictEqual(await new StreamReader(makeStream(['foo'])).skip(0), 0)
})
it('returns 0 if the stream is already ended', async () => {
const reader = new StreamReader(makeStream([]))
await reader.read()
assert.strictEqual(await reader.skip(10), 0)
})
it('skips a number of bytes', async () => {
const reader = new StreamReader(makeStream('foo bar'))
assert.strictEqual(await reader.skip(4), 4)
assert.deepEqual(await reader.read(4), Buffer.from('bar'))
})
it('returns less size if stream ends', async () => {
assert.deepEqual(await new StreamReader(makeStream('foo bar')).skip(10), 7)
})
})
describe('skipStrict()', function () {
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(new StreamReader(makeStream('foo bar')).skipStrict(10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
assert.deepEqual(error.bytesSkipped, 7)
})
})

View File

@@ -1,39 +0,0 @@
{
"private": false,
"name": "@vates/stream-reader",
"description": "Efficiently reads and skips chunks of a given size in a stream",
"keywords": [
"async",
"chunk",
"data",
"node",
"promise",
"read",
"reader",
"skip",
"stream"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/stream-reader",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/stream-reader",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.3.0"
}
}

View File

@@ -1,114 +0,0 @@
```js
import { Task } from '@vates/task'
const task = new Task({
// data in this object will be sent along the *start* event
//
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
data: {
name: 'my task',
},
// if defined, a new detached task is created
//
// if not defined and created inside an existing task, the new task is considered a subtask
onProgress(event) {
// this function is called each time this task or one of it's subtasks change state
const { id, timestamp, type } = event
if (type === 'start') {
const { name, parentId } = event
} else if (type === 'end') {
const { result, status } = event
} else if (type === 'info' || type === 'warning') {
const { data, message } = event
} else if (type === 'property') {
const { name, value } = event
}
},
})
// this field is settable once before being observed
task.id
// contains the current status of the task
//
// possible statuses are:
// - pending
// - success
// - failure
// - aborted
task.status
// Triggers the abort signal associated to the task.
//
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
task.abort(reason)
// if fn rejects, the task will be marked as failed
const result = await task.runInside(fn)
// if fn rejects, the task will be marked as failed
// if fn resolves, the task will be marked as succeeded
const result = await task.run(fn)
```
Inside a task:
```js
// the abort signal of the current task if any, otherwise is `undefined`
Task.abortSignal
// sends an info on the current task if any, otherwise does nothing
Task.info(message, data)
// sends an info on the current task if any, otherwise does nothing
Task.warning(message, data)
// attaches a property to the current task if any, otherwise does nothing
//
// the latest value takes precedence
//
// examples:
// - progress
Task.set(property, value)
```
### `combineEvents`
Create a consolidated log from individual events.
It can be used directly as an `onProgress` callback:
```js
import { makeOnProgress } from '@vates/task/combineEvents'
const onProgress = makeOnProgress({
// This function is called each time a root task starts.
//
// It will be called for as many times as there are tasks created with this `onProgress` function.
onRootTaskStart(taskLog) {
// `taskLog` is an object reflecting the state of this task and all its subtasks,
// and will be mutated in real-time to reflect the changes of the task.
},
// This function is called each time a root task ends.
onRootTaskEnd(taskLog) {},
// This function is called each time a root task or a subtask is updated.
//
// `taskLog.$root` can be used to uncondionally access the root task.
onTaskUpdate(taskLog) {},
})
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
```
It can also be fed event logs directly:
```js
import { makeOnProgress } from '@vates/task/combineEvents'
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
eventLogs.forEach(onProgress)
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,145 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/task
[![Package Version](https://badgen.net/npm/v/@vates/task)](https://npmjs.org/package/@vates/task) ![License](https://badgen.net/npm/license/@vates/task) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/task)](https://bundlephobia.com/result?p=@vates/task) [![Node compatibility](https://badgen.net/npm/node/@vates/task)](https://npmjs.org/package/@vates/task)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/task):
```sh
npm install --save @vates/task
```
## Usage
```js
import { Task } from '@vates/task'
const task = new Task({
// data in this object will be sent along the *start* event
//
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
data: {
name: 'my task',
},
// if defined, a new detached task is created
//
// if not defined and created inside an existing task, the new task is considered a subtask
onProgress(event) {
// this function is called each time this task or one of it's subtasks change state
const { id, timestamp, type } = event
if (type === 'start') {
const { name, parentId } = event
} else if (type === 'end') {
const { result, status } = event
} else if (type === 'info' || type === 'warning') {
const { data, message } = event
} else if (type === 'property') {
const { name, value } = event
}
},
})
// this field is settable once before being observed
task.id
// contains the current status of the task
//
// possible statuses are:
// - pending
// - success
// - failure
// - aborted
task.status
// Triggers the abort signal associated to the task.
//
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
task.abort(reason)
// if fn rejects, the task will be marked as failed
const result = await task.runInside(fn)
// if fn rejects, the task will be marked as failed
// if fn resolves, the task will be marked as succeeded
const result = await task.run(fn)
```
Inside a task:
```js
// the abort signal of the current task if any, otherwise is `undefined`
Task.abortSignal
// sends an info on the current task if any, otherwise does nothing
Task.info(message, data)
// sends an info on the current task if any, otherwise does nothing
Task.warning(message, data)
// attaches a property to the current task if any, otherwise does nothing
//
// the latest value takes precedence
//
// examples:
// - progress
Task.set(property, value)
```
### `combineEvents`
Create a consolidated log from individual events.
It can be used directly as an `onProgress` callback:
```js
import { makeOnProgress } from '@vates/task/combineEvents'
const onProgress = makeOnProgress({
// This function is called each time a root task starts.
//
// It will be called for as many times as there are tasks created with this `onProgress` function.
onRootTaskStart(taskLog) {
// `taskLog` is an object reflecting the state of this task and all its subtasks,
// and will be mutated in real-time to reflect the changes of the task.
},
// This function is called each time a root task ends.
onRootTaskEnd(taskLog) {},
// This function is called each time a root task or a subtask is updated.
//
// `taskLog.$root` can be used to uncondionally access the root task.
onTaskUpdate(taskLog) {},
})
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
```
It can also be fed event logs directly:
```js
import { makeOnProgress } from '@vates/task/combineEvents'
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
eventLogs.forEach(onProgress)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,77 +0,0 @@
'use strict'
const assert = require('node:assert').strict
const noop = Function.prototype
function omit(source, keys, target = { __proto__: null }) {
for (const key of Object.keys(source)) {
if (!keys.has(key)) {
target[key] = source[key]
}
}
return target
}
const IGNORED_START_PROPS = new Set([
'end',
'infos',
'properties',
'result',
'status',
'tasks',
'timestamp',
'type',
'warnings',
])
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
const taskLogs = new Map()
return function onProgress(event) {
const { id, type } = event
let taskLog
if (type === 'start') {
taskLog = omit(event, IGNORED_START_PROPS)
taskLog.start = event.timestamp
taskLog.status = 'pending'
taskLogs.set(id, taskLog)
const { parentId } = event
if (parentId === undefined) {
Object.defineProperty(taskLog, '$root', { value: taskLog })
// start of a root task
onRootTaskStart(taskLog)
} else {
// start of a subtask
const parent = taskLogs.get(parentId)
assert.notEqual(parent, undefined)
// inject a (non-enumerable) reference to the parent and the root task
Object.defineProperties(taskLog, { $parent: { value: parent }, $root: { value: parent.$root } })
;(parent.tasks ?? (parent.tasks = [])).push(taskLog)
}
} else {
taskLog = taskLogs.get(id)
assert.notEqual(taskLog, undefined)
if (type === 'info' || type === 'warning') {
const key = type + 's'
const { data, message } = event
;(taskLog[key] ?? (taskLog[key] = [])).push({ data, message })
} else if (type === 'property') {
;(taskLog.properties ?? (taskLog.properties = { __proto__: null }))[event.name] = event.value
} else if (type === 'end') {
taskLog.end = event.timestamp
taskLog.result = event.result
taskLog.status = event.status
}
if (type === 'end' && taskLog.$root === taskLog) {
onRootTaskEnd(taskLog)
}
}
onTaskUpdate(taskLog)
}
}

View File

@@ -1,67 +0,0 @@
'use strict'
const assert = require('node:assert').strict
const { describe, it } = require('test')
const { makeOnProgress } = require('./combineEvents.js')
const { Task } = require('./index.js')
describe('makeOnProgress()', function () {
it('works', async function () {
const events = []
let log
const task = new Task({
data: { name: 'task' },
onProgress: makeOnProgress({
onRootTaskStart(log_) {
assert.equal(log, undefined)
log = log_
events.push('onRootTaskStart')
},
onRootTaskEnd(log_) {
assert.equal(log_, log)
events.push('onRootTaskEnd')
},
onTaskUpdate(log_) {
assert.equal(log_.$root, log)
events.push('onTaskUpdate')
},
}),
})
assert.equal(events.length, 0)
await task.run(async () => {
assert.equal(events[0], 'onRootTaskStart')
assert.equal(events[1], 'onTaskUpdate')
assert.equal(log.name, 'task')
Task.set('progress', 0)
assert.equal(events[2], 'onTaskUpdate')
assert.equal(log.properties.progress, 0)
Task.info('foo', {})
assert.equal(events[3], 'onTaskUpdate')
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
await Task.run({ data: { name: 'subtask' } }, () => {
assert.equal(events[4], 'onTaskUpdate')
assert.equal(log.tasks[0].name, 'subtask')
Task.warning('bar', {})
assert.equal(events[5], 'onTaskUpdate')
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
})
assert.equal(events[6], 'onTaskUpdate')
assert.equal(log.tasks[0].status, 'success')
Task.set('progress', 100)
assert.equal(events[7], 'onTaskUpdate')
assert.equal(log.properties.progress, 100)
})
assert.equal(events[8], 'onRootTaskEnd')
assert.equal(events[9], 'onTaskUpdate')
assert.equal(log.status, 'success')
})
})

View File

@@ -1,182 +0,0 @@
'use strict'
const assert = require('node:assert').strict
const { AsyncLocalStorage } = require('node:async_hooks')
// define a read-only, non-enumerable, non-configurable property
function define(object, property, value) {
Object.defineProperty(object, property, { value })
}
const noop = Function.prototype
const ABORTED = 'aborted'
const FAILURE = 'failure'
const PENDING = 'pending'
const SUCCESS = 'success'
exports.STATUS = { ABORTED, FAILURE, PENDING, SUCCESS }
// stored in the global context so that various versions of the library can interact.
const asyncStorageKey = '@vates/task@0'
const asyncStorage = global[asyncStorageKey] ?? (global[asyncStorageKey] = new AsyncLocalStorage())
const getTask = () => asyncStorage.getStore()
exports.Task = class Task {
static get abortSignal() {
const task = getTask()
if (task !== undefined) {
return task.#abortController.signal
}
}
static info(message, data) {
const task = getTask()
if (task !== undefined) {
task.#emit('info', { data, message })
}
}
static run(opts, fn) {
return new this(opts).run(fn)
}
static set(name, value) {
const task = getTask()
if (task !== undefined) {
task.#emit('property', { name, value })
}
}
static warning(message, data) {
const task = getTask()
if (task !== undefined) {
task.#emit('warning', { data, message })
}
}
static wrap(opts, fn) {
// compatibility with @decorateWith
if (typeof fn !== 'function') {
;[fn, opts] = [opts, fn]
}
return function taskRun() {
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
}
}
#abortController = new AbortController()
#onProgress
get id() {
return (this.id = Math.random().toString(36).slice(2))
}
set id(value) {
define(this, 'id', value)
}
#startData
#status = PENDING
get status() {
return this.#status
}
constructor({ data = {}, onProgress } = {}) {
this.#startData = data
if (onProgress !== undefined) {
this.#onProgress = onProgress
} else {
const parent = getTask()
if (parent !== undefined) {
const { signal } = parent.#abortController
signal.addEventListener('abort', () => {
this.#abortController.abort(signal.reason)
})
this.#onProgress = parent.#onProgress
this.#startData.parentId = parent.id
} else {
this.#onProgress = noop
}
}
const { signal } = this.#abortController
signal.addEventListener('abort', () => {
if (this.status === PENDING && !this.#running) {
this.#maybeStart()
const status = ABORTED
this.#status = status
this.#emit('end', { result: signal.reason, status })
}
})
}
abort(reason) {
this.#abortController.abort(reason)
}
#emit(type, data) {
data.id = this.id
data.timestamp = Date.now()
data.type = type
this.#onProgress(data)
}
#maybeStart() {
const startData = this.#startData
if (startData !== undefined) {
this.#startData = undefined
this.#emit('start', startData)
}
}
async run(fn) {
const result = await this.runInside(fn)
if (this.status === PENDING) {
this.#status = SUCCESS
this.#emit('end', { status: SUCCESS, result })
}
return result
}
#running = false
async runInside(fn) {
assert.equal(this.status, PENDING)
assert.equal(this.#running, false)
this.#running = true
this.#maybeStart()
try {
const result = await asyncStorage.run(this, fn)
this.#running = false
return result
} catch (result) {
const { signal } = this.#abortController
const aborted = signal.aborted && result === signal.reason
const status = aborted ? ABORTED : FAILURE
this.#status = status
this.#emit('end', { status, result })
throw result
}
}
wrap(fn) {
const task = this
return function taskRun() {
return task.run(() => fn.apply(this, arguments))
}
}
wrapInside(fn) {
const task = this
return function taskRunInside() {
return task.runInside(() => fn.apply(this, arguments))
}
}
}

View File

@@ -1,341 +0,0 @@
'use strict'
const assert = require('node:assert').strict
const { describe, it } = require('test')
const { Task } = require('./index.js')
const noop = Function.prototype
function assertEvent(task, expected, eventIndex = -1) {
const logs = task.$events
const actual = logs[eventIndex < 0 ? logs.length + eventIndex : eventIndex]
assert.equal(typeof actual, 'object')
assert.equal(typeof actual.id, 'string')
assert.equal(typeof actual.timestamp, 'number')
for (const keys of Object.keys(expected)) {
assert.equal(actual[keys], expected[keys])
}
}
// like new Task() but with a custom onProgress which adds event to task.$events
function createTask(opts) {
const events = []
const task = new Task({ ...opts, onProgress: events.push.bind(events) })
task.$events = events
return task
}
describe('Task', function () {
describe('contructor', function () {
it('data properties are passed to the start event', async function () {
const data = { foo: 0, bar: 1 }
const task = createTask({ data })
await task.run(noop)
assertEvent(task, { ...data, type: 'start' }, 0)
})
})
it('subtasks events are passed to root task', async function () {
const task = createTask()
const result = {}
await task.run(async () => {
await new Task().run(() => result)
})
assert.equal(task.$events.length, 4)
assertEvent(task, { type: 'start', parentId: task.id }, 1)
assertEvent(task, { type: 'end', status: 'success', result }, 2)
})
describe('.abortSignal', function () {
it('is undefined when run outside a task', function () {
assert.equal(Task.abortSignal, undefined)
})
it('is the current abort signal when run inside a task', async function () {
const task = createTask()
await task.run(() => {
const { abortSignal } = Task
assert.equal(abortSignal.aborted, false)
task.abort()
assert.equal(abortSignal.aborted, true)
})
})
})
describe('.abort()', function () {
it('aborts if the task throws fails with the abort reason', async function () {
const task = createTask()
const reason = {}
await task
.run(() => {
task.abort(reason)
Task.abortSignal.throwIfAborted()
})
.catch(noop)
assert.equal(task.status, 'aborted')
assert.equal(task.$events.length, 2)
assertEvent(task, { type: 'start' }, 0)
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
})
it('does not abort if the task fails without the abort reason', async function () {
const task = createTask()
const result = new Error()
await task
.run(() => {
task.abort({})
throw result
})
.catch(noop)
assert.equal(task.status, 'failure')
assert.equal(task.$events.length, 2)
assertEvent(task, { type: 'start' }, 0)
assertEvent(task, { type: 'end', status: 'failure', result }, 1)
})
it('does not abort if the task succeed', async function () {
const task = createTask()
const result = {}
await task
.run(() => {
task.abort({})
return result
})
.catch(noop)
assert.equal(task.status, 'success')
assert.equal(task.$events.length, 2)
assertEvent(task, { type: 'start' }, 0)
assertEvent(task, { type: 'end', status: 'success', result }, 1)
})
it('aborts before task is running', function () {
const task = createTask()
const reason = {}
task.abort(reason)
assert.equal(task.status, 'aborted')
assert.equal(task.$events.length, 2)
assertEvent(task, { type: 'start' }, 0)
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
})
})
describe('.info()', function () {
it('does nothing when run outside a task', function () {
Task.info('foo')
})
it('emits an info message when run inside a task', async function () {
const task = createTask()
await task.run(() => {
Task.info('foo')
assertEvent(task, {
data: undefined,
message: 'foo',
type: 'info',
})
})
})
})
describe('.set()', function () {
it('does nothing when run outside a task', function () {
Task.set('progress', 10)
})
it('emits an info message when run inside a task', async function () {
const task = createTask()
await task.run(() => {
Task.set('progress', 10)
assertEvent(task, {
name: 'progress',
type: 'property',
value: 10,
})
})
})
})
describe('.warning()', function () {
it('does nothing when run outside a task', function () {
Task.warning('foo')
})
it('emits an warning message when run inside a task', async function () {
const task = createTask()
await task.run(() => {
Task.warning('foo')
assertEvent(task, {
data: undefined,
message: 'foo',
type: 'warning',
})
})
})
})
describe('#id', function () {
it('can be set', function () {
const task = createTask()
task.id = 'foo'
assert.equal(task.id, 'foo')
})
it('cannot be set more than once', function () {
const task = createTask()
task.id = 'foo'
assert.throws(() => {
task.id = 'bar'
}, TypeError)
})
it('is randomly generated if not set', function () {
assert.notEqual(createTask().id, createTask().id)
})
it('cannot be set after being observed', function () {
const task = createTask()
noop(task.id)
assert.throws(() => {
task.id = 'bar'
}, TypeError)
})
})
describe('#status', function () {
it('starts as pending', function () {
assert.equal(createTask().status, 'pending')
})
it('changes to success when finish without error', async function () {
const task = createTask()
await task.run(noop)
assert.equal(task.status, 'success')
})
it('changes to failure when finish with error', async function () {
const task = createTask()
await task
.run(() => {
throw Error()
})
.catch(noop)
assert.equal(task.status, 'failure')
})
it('changes to aborted after run is complete', async function () {
const task = createTask()
await task
.run(() => {
task.abort()
assert.equal(task.status, 'pending')
Task.abortSignal.throwIfAborted()
})
.catch(noop)
assert.equal(task.status, 'aborted')
})
it('changes to aborted if aborted when not running', async function () {
const task = createTask()
task.abort()
assert.equal(task.status, 'aborted')
})
})
function makeRunTests(run) {
it('starts the task', async function () {
const task = createTask()
await run(task, () => {
assertEvent(task, { type: 'start' })
})
})
it('finishes the task on success', async function () {
const task = createTask()
await run(task, () => 'foo')
assert.equal(task.status, 'success')
assertEvent(task, {
status: 'success',
result: 'foo',
type: 'end',
})
})
it('fails the task on error', async function () {
const task = createTask()
const e = new Error()
await run(task, () => {
throw e
}).catch(noop)
assert.equal(task.status, 'failure')
assertEvent(task, {
status: 'failure',
result: e,
type: 'end',
})
})
}
describe('.run', function () {
makeRunTests((task, fn) => task.run(fn))
})
describe('.wrap', function () {
makeRunTests((task, fn) => task.wrap(fn)())
})
function makeRunInsideTests(run) {
it('starts the task', async function () {
const task = createTask()
await run(task, () => {
assertEvent(task, { type: 'start' })
})
})
it('does not finish the task on success', async function () {
const task = createTask()
await run(task, () => 'foo')
assert.equal(task.status, 'pending')
})
it('fails the task on error', async function () {
const task = createTask()
const e = new Error()
await run(task, () => {
throw e
}).catch(noop)
assert.equal(task.status, 'failure')
assertEvent(task, {
status: 'failure',
result: e,
type: 'end',
})
})
}
describe('.runInside', function () {
makeRunInsideTests((task, fn) => task.runInside(fn))
})
describe('.wrapInside', function () {
makeRunInsideTests((task, fn) => task.wrapInside(fn)())
})
})

View File

@@ -1,31 +0,0 @@
{
"private": false,
"name": "@vates/task",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/task",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/task",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.2",
"engines": {
"node": ">=14"
},
"devDependencies": {
"test": "^3.3.0"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"exports": {
".": "./index.js",
"./combineEvents": "./combineEvents.js"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
```sh
npm install --save @vates/toggle-scripts
```
> npm install --save @vates/toggle-scripts
```
## Usage

View File

@@ -30,7 +30,6 @@ if (args.length === 0) {
${name} v${version}
`)
// eslint-disable-next-line n/no-process-exit
process.exit()
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async-map):
```sh
npm install --save @xen-orchestra/async-map
```
> npm install --save @xen-orchestra/async-map
```
## Usage

View File

@@ -35,7 +35,7 @@
"test": "node--test"
},
"devDependencies": {
"sinon": "^15.0.1",
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/audit-core):
```sh
npm install --save @xen-orchestra/audit-core
```
> npm install --save @xen-orchestra/audit-core
```
## Contributions

View File

@@ -7,7 +7,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.2.3",
"version": "0.2.1",
"engines": {
"node": ">=14"
},
@@ -17,7 +17,7 @@
},
"dependencies": {
"@vates/decorate-with": "^2.0.0",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/log": "^0.4.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
},

View File

@@ -5,6 +5,7 @@ const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
@@ -14,7 +15,7 @@ const configs = {
proposal: 'minimal',
},
'@babel/preset-env': {
debug: __PROD__,
debug: !__TEST__,
// disabled until https://github.com/babel/babel/issues/8323 is resolved
// loose: true,

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups-cli):
```sh
npm install --global @xen-orchestra/backups-cli
```
> npm install --global @xen-orchestra/backups-cli
```
## Usage

View File

@@ -1,10 +1,11 @@
import { readFileSync } from 'fs'
import getopts from 'getopts'
'use strict'
const { version } = JSON.parse(readFileSync(new URL('package.json', import.meta.url)))
const getopts = require('getopts')
export function composeCommands(commands) {
return async function (args, prefix) {
const { version } = require('./package.json')
module.exports = commands =>
async function (args, prefix) {
const opts = getopts(args, {
alias: {
help: 'h',
@@ -29,6 +30,5 @@ xo-backups v${version}
return
}
return (await command.default)(args.slice(1), prefix + ' ' + commandName)
return command.main(args.slice(1), prefix + ' ' + commandName)
}
}

View File

@@ -1,9 +1,11 @@
import fs from 'fs/promises'
import { dirname } from 'path'
'use strict'
export * from 'fs/promises'
const { dirname } = require('path')
export const getSize = path =>
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
module.exports = fs
fs.getSize = path =>
fs.stat(path).then(
_ => _.size,
error => {
@@ -14,7 +16,7 @@ export const getSize = path =>
}
)
export async function mktree(path) {
fs.mktree = async function mkdirp(path) {
try {
await fs.mkdir(path)
} catch (error) {
@@ -24,8 +26,8 @@ export async function mktree(path) {
return
}
if (code === 'ENOENT') {
await mktree(dirname(path))
return mktree(path)
await mkdirp(dirname(path))
return mkdirp(path)
}
throw error
}
@@ -35,7 +37,7 @@ export async function mktree(path) {
// - single param for direct use in `Array#map`
// - files are prefixed with directory path
// - safer: returns empty array if path is missing or not a directory
export const readdir2 = path =>
fs.readdir2 = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
@@ -57,7 +59,7 @@ export const readdir2 = path =>
}
)
export async function symlink2(target, path) {
fs.symlink2 = async (target, path) => {
try {
await fs.symlink(target, path)
} catch (error) {

View File

@@ -0,0 +1,40 @@
'use strict'
// -----------------------------------------------------------------------------
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
const getopts = require('getopts')
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
const { resolve } = require('path')
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
module.exports = async function main(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
}

View File

@@ -1,38 +0,0 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
import { getSyncedHandler } from '@xen-orchestra/fs'
import getopts from 'getopts'
import { basename, dirname } from 'path'
import Disposable from 'promise-toolbox/Disposable'
import { pathToFileURL } from 'url'
export default async function cleanVms(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, vmDir =>
Disposable.use(getSyncedHandler({ url: pathToFileURL(dirname(vmDir)).href }), async handler => {
try {
await new RemoteAdapter(handler).cleanVm(basename(vmDir), {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
)
}

View File

@@ -1,10 +1,13 @@
import { mktree, readdir2, readFile, symlink2 } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import filenamify from 'filenamify'
import get from 'lodash/get.js'
import { dirname, join, relative } from 'path'
'use strict'
export default async function createSymlinkIndex([backupDir, fieldPath]) {
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')
const { dirname, join, relative } = require('path')
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
const indexDir = join(backupDir, 'indexes', filenamify(fieldPath))
await mktree(indexDir)

View File

@@ -1,13 +1,16 @@
import { readdir2, readFile, getSize } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import { createHash } from 'crypto'
import groupBy from 'lodash/groupBy.js'
import { dirname, resolve } from 'path'
'use strict'
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const { readdir2, readFile, getSize } = require('../_fs')
const sha512 = str => createHash('sha512').update(str).digest('hex')
const sum = values => values.reduce((a, b) => a + b)
export default async function info(vmDirs) {
module.exports = async function info(vmDirs) {
const jsonFiles = (
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
).flat()

View File

@@ -1,12 +1,11 @@
#!/usr/bin/env node
import { composeCommands } from './_composeCommands.mjs'
const importDefault = async path => (await import(path)).default
'use strict'
composeCommands({
require('./_composeCommands')({
'clean-vms': {
get default() {
return importDefault('./commands/clean-vms.mjs')
get main() {
return require('./commands/clean-vms')
},
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
@@ -19,14 +18,14 @@ composeCommands({
`,
},
'create-symlink-index': {
get default() {
return importDefault('./commands/create-symlink-index.mjs')
get main() {
return require('./commands/create-symlink-index')
},
usage: 'xo-vm-backups <field path>',
},
info: {
get default() {
return importDefault('./commands/info.mjs')
get main() {
return require('./commands/info')
},
usage: 'xo-vm-backups/*',
},

View File

@@ -1,21 +1,21 @@
{
"private": false,
"bin": {
"xo-backups": "index.mjs"
"xo-backups": "index.js"
},
"preferGlobal": true,
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.36.1",
"@xen-orchestra/fs": "^3.3.4",
"@xen-orchestra/backups": "^0.29.0",
"@xen-orchestra/fs": "^3.2.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0"
},
"engines": {
"node": ">=14"
"node": ">=7.10.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "1.0.6",
"version": "0.7.8",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -3,7 +3,6 @@
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const pTimeout = require('promise-toolbox/timeout')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
@@ -12,7 +11,6 @@ const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
const createStreamThrottle = require('./_createStreamThrottle.js')
const noop = Function.prototype
@@ -27,7 +25,6 @@ const getAdaptersByRemote = adapters => {
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
const DEFAULT_SETTINGS = {
getRemoteTimeout: 300e3,
reportWhen: 'failure',
}
@@ -41,15 +38,13 @@ const DEFAULT_VM_SETTINGS = {
fullInterval: 0,
healthCheckSr: undefined,
healthCheckVmsWithTags: [],
maxExportRate: 0,
maxMergedDeltasPerRun: Infinity,
maxMergedDeltasPerRun: 2,
offlineBackup: false,
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
useNbd: false,
unconditionalSnapshot: false,
validateVhdStreams: false,
vmTimeout: 0,
}
@@ -58,13 +53,6 @@ const DEFAULT_METADATA_SETTINGS = {
retentionXoMetadata: 0,
}
class RemoteTimeoutError extends Error {
constructor(remoteId) {
super('timeout while getting the remote ' + remoteId)
this.remoteId = remoteId
}
}
exports.Backup = class Backup {
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
this._config = config
@@ -72,6 +60,13 @@ exports.Backup = class Backup {
this._job = job
this._schedule = schedule
this._getAdapter = Disposable.factory(function* (remoteId) {
return {
adapter: yield getAdapter(remoteId),
remoteId,
}
})
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
'{job.name}': job.name,
'{vm.name_label}': vm => vm.name_label,
@@ -92,27 +87,6 @@ exports.Backup = class Backup {
this._baseSettings = baseSettings
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
const { getRemoteTimeout } = this._settings
this._getAdapter = async function (remoteId) {
try {
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
return new Disposable(() => disposable.dispose(), {
adapter: disposable.value,
remoteId,
})
} catch (error) {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id: remoteId },
},
() => Promise.reject(error)
)
}
}
}
async _runMetadataBackup() {
@@ -158,7 +132,20 @@ exports.Backup = class Backup {
})
)
),
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
Disposable.all(
remoteIds.map(id =>
this._getAdapter(id).catch(error => {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id },
},
() => Promise.reject(error)
)
})
)
),
async (pools, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
@@ -229,11 +216,9 @@ exports.Backup = class Backup {
// FIXME: proper SimpleIdPattern handling
const getSnapshotNameLabel = this._getSnapshotNameLabel
const schedule = this._schedule
const settings = this._settings
const throttleStream = createStreamThrottle(settings.maxExportRate)
const config = this._config
const settings = this._settings
await Disposable.use(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
@@ -248,7 +233,19 @@ exports.Backup = class Backup {
})
)
),
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
Disposable.all(
extractIdsFromSimplePattern(job.remotes).map(id =>
this._getAdapter(id).catch(error => {
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id },
},
() => Promise.reject(error)
)
})
)
),
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
async (srs, remoteAdapters, healthCheckSr) => {
// remove adapters that failed (already handled)
@@ -270,35 +267,23 @@ exports.Backup = class Backup {
const allSettings = this._job.settings
const baseSettings = this._baseSettings
const handleVm = vmUuid => {
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
return this._getRecord('VM', vmUuid).then(
disposableVm =>
Disposable.use(disposableVm, vm => {
taskStart.data.name_label = vm.name_label
return runTask(taskStart, () =>
new VmBackup({
baseSettings,
config,
getSnapshotNameLabel,
healthCheckSr,
job,
remoteAdapters,
schedule,
settings: { ...settings, ...allSettings[vm.uuid] },
srs,
throttleStream,
vm,
}).run()
)
}),
error =>
runTask(taskStart, () => {
throw error
})
const handleVm = vmUuid =>
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
Disposable.use(this._getRecord('VM', vmUuid), vm =>
new VmBackup({
baseSettings,
config,
getSnapshotNameLabel,
healthCheckSr,
job,
remoteAdapters,
schedule,
settings: { ...settings, ...allSettings[vm.uuid] },
srs,
vm,
}).run()
)
)
}
const { concurrency } = settings
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
}

View File

@@ -3,14 +3,12 @@
const { Task } = require('./Task')
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
#restoredVm
#timeout
#xapi
#restoredVm
constructor({ restoredVm, timeout = 10 * 60 * 1000, xapi }) {
constructor({ restoredVm, xapi }) {
this.#restoredVm = restoredVm
this.#xapi = xapi
this.#timeout = timeout
}
async run() {
@@ -25,12 +23,7 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
// remove vifs
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
const waitForScript = restoredVm.tags.includes('xo-backup-health-check-xenstore')
if (waitForScript) {
await restoredVm.set_xenstore_data({
'vm-data/xo-backup-health-check': 'planned',
})
}
const start = new Date()
// start Vm
@@ -41,7 +34,7 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
false // Skip pre-boot checks?
)
const started = new Date()
const timeout = this.#timeout
const timeout = 10 * 60 * 1000
const startDuration = started - start
let remainingTimeout = timeout - startDuration
@@ -59,52 +52,12 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
remainingTimeout -= running - started
if (remainingTimeout < 0) {
throw new Error(`local xapi did not get Running state for VM ${restoredId} after ${timeout / 1000} second`)
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
}
// wait for the guest tool version to be defined
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
timeout: remainingTimeout,
})
const guestToolsReady = new Date()
remainingTimeout -= guestToolsReady - running
if (remainingTimeout < 0) {
throw new Error(`local xapi did not get he guest tools check ${restoredId} after ${timeout / 1000} second`)
}
if (waitForScript) {
const startedRestoredVm = await xapi.waitObjectState(
restoredVm.$ref,
vm =>
vm?.xenstore_data !== undefined &&
(vm.xenstore_data['vm-data/xo-backup-health-check'] === 'success' ||
vm.xenstore_data['vm-data/xo-backup-health-check'] === 'failure'),
{
timeout: remainingTimeout,
}
)
const scriptOk = new Date()
remainingTimeout -= scriptOk - guestToolsReady
if (remainingTimeout < 0) {
throw new Error(
`Backup health check script did not update vm-data/xo-backup-health-check of ${restoredId} after ${
timeout / 1000
} second, got ${
startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check']
} instead of 'success' or 'failure'`
)
}
if (startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check'] !== 'success') {
const message = startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check-error']
if (message) {
throw new Error(`Backup health check script failed with message ${message} for VM ${restoredId} `)
} else {
throw new Error(`Backup health check script failed for VM ${restoredId} `)
}
}
Task.info('Backup health check script successfully executed')
}
}
)
}

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups):
```sh
npm install --save @xen-orchestra/backups
```
> npm install --save @xen-orchestra/backups
```
## Contributions

View File

@@ -28,7 +28,6 @@ const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
const { watchStreamSize } = require('./_watchStreamSize')
// @todo : this import is marked extraneous , sould be fixed when lib is published
const { mount } = require('@vates/fuse-vhd')
const { asyncEach } = require('@vates/async-each')
@@ -209,8 +208,8 @@ class RemoteAdapter {
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.useVhdDirectory()
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.#useVhdDirectory()
})
}
@@ -233,23 +232,21 @@ class RemoteAdapter {
return promise
}
async #removeVmBackupsFromCache(backups) {
await asyncEach(
Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
),
([dir, filenames]) =>
// will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
)
#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}
async deleteDeltaVmBackups(backups) {
@@ -258,7 +255,7 @@ class RemoteAdapter {
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
await this.#removeVmBackupsFromCache(backups)
this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -287,7 +284,7 @@ class RemoteAdapter {
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
await this.#removeVmBackupsFromCache(backups)
this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -321,12 +318,12 @@ class RemoteAdapter {
return this._vhdDirectoryCompression
}
useVhdDirectory() {
#useVhdDirectory() {
return this.handler.useVhdDirectory()
}
#useAlias() {
return this.useVhdDirectory()
return this.#useVhdDirectory()
}
async *#getDiskLegacy(diskId) {
@@ -511,7 +508,7 @@ class RemoteAdapter {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async _readCache(path) {
async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
@@ -524,15 +521,15 @@ class RemoteAdapter {
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this._readCache(path)
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)
await this._writeCache(path, cache)
await this.#writeCache(path, cache)
}
}
async _writeCache(path, data) {
async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
@@ -540,6 +537,10 @@ class RemoteAdapter {
}
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
@@ -580,7 +581,7 @@ class RemoteAdapter {
async _readCacheListVmBackups(vmUuid) {
const path = this.#getVmBackupsCache(vmUuid)
const cache = await this._readCache(path)
const cache = await this.#readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
@@ -593,7 +594,7 @@ class RemoteAdapter {
}
// detached async action, will not reject
this._writeCache(path, backups)
this.#writeCache(path, backups)
return backups
}
@@ -644,7 +645,7 @@ class RemoteAdapter {
})
// will not throw
await this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
@@ -658,27 +659,26 @@ class RemoteAdapter {
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
const handler = this._handler
if (this.useVhdDirectory()) {
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
return size
} else {
return this.outputStream(path, input, { checksum, validator })
await this.outputStream(path, input, { checksum, validator })
}
}
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
const container = watchStreamSize(input)
await this._handler.outputStream(path, input, {
checksum,
dirMode: this._dirMode,
@@ -687,7 +687,6 @@ class RemoteAdapter {
return validator.apply(this, arguments)
},
})
return container.size
}
// open the hierarchy of ancestors until we find a full one
@@ -719,7 +718,7 @@ class RemoteAdapter {
async readDeltaVmBackup(metadata, ignoredVdis) {
const handler = this._handler
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
const { vbds, vhds, vifs, vm } = metadata
const dir = dirname(metadata._filename)
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
@@ -734,7 +733,7 @@ class RemoteAdapter {
vdis,
version: '1.0.0',
vifs,
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
vm,
}
}
@@ -746,49 +745,7 @@ class RemoteAdapter {
// _filename is a private field used to compute the backup id
//
// it's enumerable to make it cacheable
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
if (typeof metadata.vm.is_a_template === 'number') {
const properties = {
vbds: ['bootable', 'unpluggable', 'storage_lock', 'empty', 'currently_attached'],
vdis: [
'sharable',
'read_only',
'storage_lock',
'managed',
'missing',
'is_a_snapshot',
'allow_caching',
'metadata_latest',
],
vifs: ['currently_attached', 'MAC_autogenerated'],
vm: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_a_snapshot', 'is_snapshot_from_vmpp'],
vmSnapshot: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_snapshot_from_vmpp'],
}
function fixBooleans(obj, properties) {
properties.forEach(property => {
if (typeof obj[property] === 'number') {
obj[property] = obj[property] === 1
}
})
}
for (const [key, propertiesInKey] of Object.entries(properties)) {
const value = metadata[key]
if (value !== undefined) {
// some properties of the metadata are collections indexed by the opaqueRef
const isCollection = Object.keys(value).some(subKey => subKey.startsWith('OpaqueRef:'))
if (isCollection) {
Object.values(value).forEach(subValue => fixBooleans(subValue, propertiesInKey))
} else {
fixBooleans(value, propertiesInKey)
}
}
}
}
return metadata
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
}
}

View File

@@ -100,7 +100,7 @@ class Task {
* In case of error, the task will be failed.
*
* @typedef Result
* @param {() => Result} fn
* @param {() => Result)} fn
* @param {boolean} last - Whether the task should succeed if there is no error
* @returns Result
*/

View File

@@ -6,13 +6,11 @@ const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const keyBy = require('lodash/keyBy.js')
const mapValues = require('lodash/mapValues.js')
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { formatDateTime } = require('@xen-orchestra/xapi')
const { pipeline } = require('node:stream')
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
@@ -46,8 +44,6 @@ const forkDeltaExport = deltaExport =>
},
})
const noop = Function.prototype
class VmBackup {
constructor({
config,
@@ -59,7 +55,6 @@ class VmBackup {
schedule,
settings,
srs,
throttleStream,
vm,
}) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
@@ -87,7 +82,6 @@ class VmBackup {
this._healthCheckSr = healthCheckSr
this._jobId = job.id
this._jobSnapshots = undefined
this._throttleStream = throttleStream
this._xapi = vm.$xapi
// Base VM for the export
@@ -249,25 +243,14 @@ class VmBackup {
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
fullVdisRequired,
})
// since NBD is network based, if one disk use nbd , all the disk use them
// except the suspended VDI
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
Task.info('Transfer data using NBD')
}
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
if (this._settings.validateVhdStreams) {
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
}
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
const timestamp = Date.now()
await this._callWriters(
writer =>
writer.transfer({
deltaExport: this._writers.size > 1 ? forkDeltaExport(deltaExport) : deltaExport,
deltaExport: forkDeltaExport(deltaExport),
sizeContainers,
timestamp,
}),
@@ -302,12 +285,10 @@ class VmBackup {
async _copyFull() {
const { compression } = this.job
const stream = this._throttleStream(
await this._xapi.VM_export(this.exportedVm.$ref, {
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
useSnapshot: false,
})
)
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
useSnapshot: false,
})
const sizeContainer = watchStreamSize(stream)
const timestamp = Date.now()

View File

@@ -1,8 +1,8 @@
'use strict'
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
require('@xen-orchestra/log').createLogger('xo:backups:worker')
)
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
@@ -20,8 +20,6 @@ const { Backup } = require('./Backup.js')
const { RemoteAdapter } = require('./RemoteAdapter.js')
const { Task } = require('./Task.js')
const { debug } = logger
class BackupWorker {
#config
#job
@@ -124,11 +122,6 @@ decorateMethodsWith(BackupWorker, {
]),
})
const emitMessage = message => {
debug('message emitted', { message })
process.send(message)
}
// Received message:
//
// Message {
@@ -146,8 +139,6 @@ const emitMessage = message => {
// result?: any
// }
process.on('message', async message => {
debug('message received', { message })
if (message.action === 'run') {
const backupWorker = new BackupWorker(message.data)
try {
@@ -156,7 +147,7 @@ process.on('message', async message => {
{
name: 'backup run',
onLog: data =>
emitMessage({
process.send({
data,
type: 'log',
}),
@@ -165,13 +156,13 @@ process.on('message', async message => {
)
: await backupWorker.run()
emitMessage({
process.send({
type: 'result',
result,
status: 'success',
})
} catch (error) {
emitMessage({
process.send({
type: 'result',
result: error,
status: 'failure',

View File

@@ -3,7 +3,7 @@
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, openVhd, VhdAbstract, VhdFile, BrokenVhdError } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
@@ -242,7 +242,7 @@ exports.cleanVm = async function cleanVm(
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
if (error?.code === 'ERR_ASSERTION' && remove) {
if (error instanceof BrokenVhdError && remove) {
logInfo('deleting broken VHD', { path })
return VhdAbstract.unlink(handler, path)
}
@@ -311,6 +311,7 @@ exports.cleanVm = async function cleanVm(
}
const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -326,20 +327,6 @@ exports.cleanVm = async function cleanVm(
}
})
const cachePath = vmDir + '/cache.json.gz'
let mustRegenerateCache
{
const cache = await this._readCache(cachePath)
const actual = cache === undefined ? 0 : Object.keys(cache).length
const expected = jsons.size
mustRegenerateCache = actual !== expected
if (mustRegenerateCache) {
logWarn('unexpected number of entries in backup cache', { path: cachePath, actual, expected })
}
}
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
@@ -351,8 +338,6 @@ exports.cleanVm = async function cleanVm(
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
const backups = new Map()
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
@@ -365,16 +350,19 @@ exports.cleanVm = async function cleanVm(
return
}
let isBackupComplete
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
isBackupComplete = xvas.has(linkedXva)
if (isBackupComplete) {
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
@@ -383,28 +371,22 @@ exports.cleanVm = async function cleanVm(
})()
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
isBackupComplete = missingVhds.length === 0
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (isBackupComplete) {
if (missingVhds.length === 0) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
linkedVhds.forEach(path => {
vhdsToJSons[path] = json
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
}
}
if (isBackupComplete) {
backups.set(json, metadata)
} else {
jsons.delete(json)
if (remove) {
logInfo('deleting incomplete backup', { backup: json })
mustRegenerateCache = true
await handler.unlink(json)
if (remove) {
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
jsons.delete(json)
await handler.unlink(json)
}
}
}
})
@@ -514,7 +496,7 @@ exports.cleanVm = async function cleanVm(
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = backups.get(metadataPath)
const metadata = JSON.parse(await handler.readFile(metadataPath))
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
@@ -541,8 +523,7 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
console.warn('cleanVm: incorrect backup size in metadata', {
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
@@ -557,7 +538,6 @@ exports.cleanVm = async function cleanVm(
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
mustRegenerateCache = true
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
@@ -566,16 +546,9 @@ exports.cleanVm = async function cleanVm(
}
})
if (mustRegenerateCache) {
const cache = {}
for (const [path, content] of backups.entries()) {
cache[path] = {
_filename: path,
id: path,
...content,
}
}
await this._writeCache(cachePath, cache)
// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
}
return {

View File

@@ -31,7 +31,7 @@ beforeEach(async () => {
})
afterEach(async () => {
await rimraf(tempDir)
await pFromCallback(cb => rimraf(tempDir, cb))
await handler.forget()
})
@@ -221,7 +221,7 @@ test('it merges delta of non destroyed chain', async () => {
loggued.push(message)
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued[0], `unexpected number of entries in backup cache`)
assert.equal(loggued[0], `incorrect backup size in metadata`)
loggued = []
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
@@ -378,19 +378,7 @@ describe('tests multiple combination ', () => {
],
})
)
if (!useAlias && vhdMode === 'directory') {
try {
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
} catch (err) {
assert.strictEqual(
err.code,
'NOT_SUPPORTED',
'Merging directory without alias should raise a not supported error'
)
return
}
assert.strictEqual(true, false, 'Merging directory without alias should raise an error')
}
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))

View File

@@ -1,17 +0,0 @@
'use strict'
const { pipeline } = require('node:stream')
const { ThrottleGroup } = require('@kldzj/stream-throttle')
const identity = require('lodash/identity.js')
const noop = Function.prototype
module.exports = function createStreamThrottle(rate) {
if (rate === 0) {
return identity
}
const group = new ThrottleGroup({ rate })
return function throttleStream(stream) {
return pipeline(stream, group.createThrottle(), noop)
}
}

View File

@@ -12,7 +12,7 @@ const { defer } = require('golike-defer')
const { cancelableMap } = require('./_cancelableMap.js')
const { Task } = require('./Task.js')
const pick = require('lodash/pick.js')
const { pick } = require('lodash')
const TAG_BASE_DELTA = 'xo:base_delta'
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
@@ -187,11 +187,11 @@ exports.importDeltaVm = defer(async function importDeltaVm(
// 0. Create suspend_VDI
let suspendVdi
if (vmRecord.suspend_VDI !== undefined && vmRecord.suspend_VDI !== 'OpaqueRef:NULL') {
if (vmRecord.power_state === 'Suspended') {
const vdi = vdiRecords[vmRecord.suspend_VDI]
if (vdi === undefined) {
Task.warning('Suspend VDI not available for this suspended VM', {
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
vm: pick(vmRecord, 'uuid', 'name_label'),
})
} else {
suspendVdi = await xapi.getRecord(
@@ -258,9 +258,6 @@ exports.importDeltaVm = defer(async function importDeltaVm(
$defer.onFailure(() => newVdi.$destroy())
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
if (vdi.virtual_size > newVdi.virtual_size) {
await newVdi.$callAsync('resize', vdi.virtual_size)
}
} else if (vdiRef === vmRecord.suspend_VDI) {
// suspendVDI has already created
newVdi = suspendVdi

View File

@@ -1,6 +1,7 @@
'use strict'
const { finished, PassThrough } = require('node:stream')
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
@@ -8,29 +9,29 @@ const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStr
//
// in case of error in the new readable stream, it will simply be unpiped
// from the original one
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
const { forks = 0 } = source
source.forks = forks + 1
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: source.forks })
debug('forking', { forks: stream.forks })
const fork = new PassThrough()
source.pipe(fork)
finished(source, { writable: false }, error => {
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
fork.destroy(error)
proxy.destroy(error)
}
})
finished(fork, { readable: false }, error => {
debug('end of stream, unpiping', { error, forks: --source.forks })
eos(proxy, error => {
debug('end of stream, unpiping', { error, forks: --stream.forks })
source.unpipe(fork)
stream.unpipe(proxy)
if (source.forks === 0) {
if (stream.forks === 0) {
debug('no more forks, destroying original stream')
source.destroy(new Error('no more consumers for this stream'))
stream.destroy(new Error('no more consumers for this stream'))
}
})
return fork
return proxy
}

View File

@@ -94,13 +94,13 @@ In case any incoherence is detected, the file is deleted so it will be fully gen
job.start(data: { mode: Mode, reportWhen: ReportWhen })
├─ task.info(message: 'vms', data: { vms: string[] })
├─ task.warning(message: string)
├─ task.start(data: { type: 'VM', id: string, name_label?: string })
├─ task.start(data: { type: 'VM', id: string })
│ ├─ task.warning(message: string)
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ ├─ task.start(message: 'snapshot')
│ │ └─ task.end
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, name_label?: string, isFull: boolean })
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
│ │ ├─ task.warning(message: string)
│ │ ├─ task.start(message: 'transfer')
│ │ │ ├─ task.warning(message: string)

View File

@@ -1,35 +0,0 @@
#!/bin/sh
# This script must be executed at the start of the machine.
#
# It must run as root to be able to use xenstore-read and xenstore-write
# fail in case of error or undefined variable
set -eu
# stop there if a health check is not in progress
if [ "$(xenstore-read vm-data/xo-backup-health-check 2>&1)" != planned ]
then
exit
fi
# not necessary, but informs XO that this script has started which helps diagnose issues
xenstore-write vm-data/xo-backup-health-check running
# put your test here
#
# in this example, the command `sqlite3` is used to validate the health of a database
# and its output is captured and passed to XO via the XenStore in case of error
if output=$(sqlite3 ~/my-database.sqlite3 .table 2>&1)
then
# inform XO everything is ok
xenstore-write vm-data/xo-backup-health-check success
else
# inform XO there is an issue
xenstore-write vm-data/xo-backup-health-check failure
# more info about the issue can be written to `vm-data/health-check-error`
#
# it will be shown in XO
xenstore-write vm-data/xo-backup-health-check-error "$output"
fi

View File

@@ -4,7 +4,7 @@
'use strict'
const { catchGlobalErrors } = require('@xen-orchestra/log/configure')
const { catchGlobalErrors } = require('@xen-orchestra/log/configure.js')
const { createLogger } = require('@xen-orchestra/log')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { join } = require('path')

View File

@@ -8,51 +8,51 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.36.1",
"version": "0.29.0",
"engines": {
"node": ">=14.6"
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "node--test *.integ.js"
"test": "node--test"
},
"dependencies": {
"@kldzj/stream-throttle": "^1.1.1",
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.4",
"@vates/disposable": "^0.1.2",
"@vates/fuse-vhd": "^1.0.0",
"@vates/nbd-client": "^1.2.0",
"@vates/nbd-client": "*",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.3.4",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/fs": "^3.2.0",
"@xen-orchestra/log": "^0.4.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^5.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"fs-extra": "^11.1.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
"golike-defer": "^0.5.1",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^2.0.0",
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^9.0.0",
"vhd-lib": "^4.4.0",
"vhd-lib": "^4.1.1",
"yazl": "^2.5.1"
},
"devDependencies": {
"rimraf": "^4.1.1",
"sinon": "^15.0.1",
"rimraf": "^3.0.2",
"sinon": "^14.0.1",
"test": "^3.2.1",
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^2.2.0"
"@xen-orchestra/xapi": "^1.5.2"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -12,7 +12,7 @@ exports.runBackupWorker = function runBackupWorker(params, onLog) {
return new Promise((resolve, reject) => {
const worker = fork(PATH)
worker.on('exit', (code, signal) => reject(new Error(`worker exited with code ${code} and signal ${signal}`)))
worker.on('exit', code => reject(new Error(`worker exited with code ${code}`)))
worker.on('error', reject)
worker.on('message', message => {

View File

@@ -7,12 +7,11 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { asyncMap } = require('@xen-orchestra/async-map')
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { dirname } = require('path')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getOldEntries } = require('../_getOldEntries.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { Task } = require('../Task.js')
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
@@ -20,16 +19,18 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const NbdClient = require('@vates/nbd-client')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
const { debug, warn } = createLogger('xo:backups:DeltaBackupWriter')
class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
async checkBaseVdis(baseUuidToSrcVdi) {
const { handler } = this._adapter
const backup = this._backup
const adapter = this._adapter
const vdisDir = `${this._vmBackupDir}/vdis/${backup.job.id}`
const backupDir = getVmBackupDir(backup.vm.uuid)
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
let found = false
@@ -134,7 +135,7 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
}
}
async _transfer($defer, { timestamp, deltaExport }) {
async _transfer({ timestamp, deltaExport, sizeContainers }) {
const adapter = this._adapter
const backup = this._backup
@@ -142,6 +143,7 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
const jobId = job.id
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -173,10 +175,9 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
}
const { size } = await Task.run({ name: 'transfer' }, async () => {
let transferSize = 0
await Promise.all(
map(deltaExport.vdis, async (vdi, id) => {
const path = `${this._vmBackupDir}/${vhds[id]}`
const path = `${backupDir}/${vhds[id]}`
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
let parentPath
@@ -199,12 +200,30 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
await checkVhd(handler, parentPath)
}
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
if (!this._backup.config.useNbd) {
// get nbd if possible
try {
// this will always take the first host in the list
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
debug(`got nbd connection `, { vdi: vdi.uuid })
} catch (error) {
nbdClient = undefined
debug(`can't connect to nbd server or no server available`, { error })
}
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {
@@ -219,7 +238,9 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
})
})
)
return { size: transferSize }
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
})
metadataContent.size = size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
@@ -227,6 +248,3 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
// TODO: run cleanup?
}
}
exports.DeltaBackupWriter = decorateClass(DeltaBackupWriter, {
_transfer: defer,
})

View File

@@ -45,13 +45,11 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
data: {
id: this._sr.uuid,
isFull,
name_label: this._sr.name_label,
type: 'SR',
},
})
this.transfer = task.wrapFn(this.transfer)
this.cleanup = task.wrapFn(this.cleanup)
this.healthCheck = task.wrapFn(this.healthCheck, true)
this.cleanup = task.wrapFn(this.cleanup, true)
return task.run(() => this._prepare())
}
@@ -82,7 +80,6 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
}
async _transfer({ timestamp, deltaExport, sizeContainers }) {
const { _warmMigration } = this._settings
const sr = this._sr
const { job, scheduleId, vm } = this._backup
@@ -95,7 +92,7 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
__proto__: deltaExport,
vm: {
...deltaExport.vm,
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
tags: [...deltaExport.vm.tags, 'Continuous Replication'],
},
},
sr
@@ -104,13 +101,11 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
})
this._targetVmRef = targetVmRef
const targetVm = await xapi.getRecord('VM', targetVmRef)
await Promise.all([
// warm migration does not disable HA , since the goal is to start the new VM in production
!_warmMigration &&
targetVm.ha_restart_priority !== '' &&
targetVm.ha_restart_priority !== '' &&
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
asyncMap(['start', 'start_on'], op =>

View File

@@ -2,6 +2,7 @@
const { formatFilenameDate } = require('../_filenameDate.js')
const { getOldEntries } = require('../_getOldEntries.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { Task } = require('../Task.js')
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
@@ -33,6 +34,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -45,8 +47,9 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const basename = formatFilenameDate(timestamp)
const dataBasename = basename + '.xva'
const dataFilename = this._vmBackupDir + '/' + dataBasename
const dataFilename = backupDir + '/' + dataBasename
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,

View File

@@ -21,7 +21,6 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
name: 'export',
data: {
id: props.sr.uuid,
name_label: this._sr.name_label,
type: 'SR',
// necessary?
@@ -47,7 +46,7 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
const { deleteFirst, _warmMigration } = settings
const { deleteFirst } = settings
if (deleteFirst) {
await deleteOldBackups()
}
@@ -56,18 +55,14 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await xapi.VM_import(stream, sr.$ref, vm =>
Promise.all([
!_warmMigration && vm.add_tags('Disaster Recovery'),
// warm migration does not disable HA , since the goal is to start the new VM in production
!_warmMigration &&
vm.ha_restart_priority !== '' &&
Promise.all([vm.set_ha_restart_priority(''), vm.add_tags('HA disabled')]),
vm.add_tags('Disaster Recovery'),
vm.ha_restart_priority !== '' && Promise.all([vm.set_ha_restart_priority(''), vm.add_tags('HA disabled')]),
vm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
])
)
return { size: sizeContainer.size }
})
this._targetVmRef = targetVmRef
const targetVm = await xapi.getRecord('VM', targetVmRef)
await Promise.all([

View File

@@ -16,6 +16,7 @@ const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
exports.MixinBackupWriter = (BaseClass = Object) =>
class MixinBackupWriter extends BaseClass {
#lock
#vmBackupDir
constructor({ remoteId, ...rest }) {
super(rest)
@@ -23,13 +24,13 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
this._adapter = rest.backup.remoteAdapters[remoteId]
this._remoteId = remoteId
this._vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
this.#vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
}
async _cleanVm(options) {
try {
return await Task.run({ name: 'clean-vm' }, () => {
return this._adapter.cleanVm(this._vmBackupDir, {
return this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
logInfo: info,
@@ -49,7 +50,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
async beforeBackup() {
const { handler } = this._adapter
const vmBackupDir = this._vmBackupDir
const vmBackupDir = this.#vmBackupDir
await handler.mktree(vmBackupDir)
this.#lock = await handler.lock(vmBackupDir)
}
@@ -80,7 +81,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
assert.notStrictEqual(
this._metadataFileName,
undefined,
'Metadata file name should be defined before making a health check'
'Metadata file name should be defined before making a healthcheck'
)
return Task.run(
{

View File

@@ -1,17 +1,5 @@
'use strict'
const { Task } = require('../Task')
const assert = require('node:assert/strict')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup')
function extractOpaqueRef(str) {
const OPAQUE_REF_RE = /OpaqueRef:[0-9a-z-]+/
const matches = OPAQUE_REF_RE.exec(str)
if (!matches) {
throw new Error('no opaque ref found')
}
return matches[0]
}
exports.MixinReplicationWriter = (BaseClass = Object) =>
class MixinReplicationWriter extends BaseClass {
constructor({ sr, ...rest }) {
@@ -19,32 +7,4 @@ exports.MixinReplicationWriter = (BaseClass = Object) =>
this._sr = sr
}
healthCheck(sr) {
assert.notEqual(this._targetVmRef, undefined, 'A vm should have been transfered to be health checked')
// copy VM
return Task.run(
{
name: 'health check',
},
async () => {
const { $xapi: xapi } = sr
let clonedVm
try {
const baseVm = xapi.getObject(this._targetVmRef) ?? (await xapi.waitObject(this._targetVmRef))
const clonedRef = await xapi
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
.then(extractOpaqueRef)
clonedVm = xapi.getObject(clonedRef) ?? (await xapi.waitObject(clonedRef))
await new HealthCheckVmBackup({
restoredVm: clonedVm,
xapi,
}).run()
} finally {
clonedVm && (await xapi.VM_destroy(clonedVm.$ref))
}
}
)
}
}

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/cr-seed-cli):
```sh
npm install --global @xen-orchestra/cr-seed-cli
```
> npm install --global @xen-orchestra/cr-seed-cli
```
## Contributions

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.3.1"
"xen-api": "^1.2.2"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/cron):
```sh
npm install --save @xen-orchestra/cron
```
> npm install --save @xen-orchestra/cron
```
## Usage

View File

@@ -42,7 +42,7 @@
"test": "node--test"
},
"devDependencies": {
"sinon": "^15.0.1",
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/defined):
```sh
npm install --save @xen-orchestra/defined
```
> npm install --save @xen-orchestra/defined
```
## Contributions

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/emit-async):
```sh
npm install --save @xen-orchestra/emit-async
```
> npm install --save @xen-orchestra/emit-async
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/fs):
```sh
npm install --global @xen-orchestra/fs
```
> npm install --global @xen-orchestra/fs
```
## Contributions

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "3.3.4",
"version": "3.2.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -20,7 +20,6 @@
"node": ">=14.13"
},
"dependencies": {
"@aws-sdk/abort-controller": "^3.272.0",
"@aws-sdk/client-s3": "^3.54.0",
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
@@ -29,13 +28,13 @@
"@vates/async-each": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.1.1",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/log": "^0.4.0",
"bind-property-descriptor": "^2.0.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",
"fs-extra": "^11.1.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
@@ -51,9 +50,10 @@
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
"rimraf": "^4.1.1",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
},
"scripts": {

View File

@@ -14,7 +14,7 @@ import { basename, dirname, normalize as normalizePath } from './path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
import { DEFAULT_ENCRYPTION_ALGORITHM, _getEncryptor } from './_encryptor'
const { info, warn } = createLogger('xo:fs:abstract')
const { info, warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
@@ -284,25 +284,15 @@ export default class RemoteHandlerAbstract {
return this._encryptor.decryptData(data)
}
async #rename(oldPath, newPath, { checksum }, createTree = true) {
try {
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
}
await p
} catch (error) {
// ENOENT can be a missing target directory OR a missing source
if (error.code === 'ENOENT' && createTree) {
await this._mktree(dirname(newPath))
return this.#rename(oldPath, newPath, { checksum }, false)
}
throw error
}
}
async rename(oldPath, newPath, { checksum = false } = {}) {
oldPath = normalizePath(oldPath)
newPath = normalizePath(newPath)
rename(oldPath, newPath, { checksum = false } = {}) {
return this.#rename(normalizePath(oldPath), normalizePath(newPath), { checksum })
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
}
return p
}
async copy(oldPath, newPath, { checksum = false } = {}) {

View File

@@ -116,7 +116,7 @@ describe('encryption', () => {
dir = await pFromCallback(cb => tmp.dir(cb))
})
afterAll(async () => {
await rimraf(dir)
await pFromCallback(cb => rimraf(dir, cb))
})
it('sync should NOT create metadata if missing (not encrypted)', async () => {

View File

@@ -1,7 +1,7 @@
import through2 from 'through2'
import { createHash } from 'crypto'
import { defer, fromEvent } from 'promise-toolbox'
import invert from 'lodash/invert.js'
import { invert } from 'lodash'
// Format: $<algorithm>$<salt>$<encrypted>
//

Some files were not shown because too many files have changed in this diff Show More