Compare commits
1 Commits
spec/vtpm
...
feat_more_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1282aa999 |
@@ -1 +0,0 @@
|
||||
{ "extends": ["@commitlint/config-conventional"] }
|
||||
@@ -28,10 +28,8 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['*.{spec,test}.{,c,m}js'],
|
||||
files: ['*.spec.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-unpublished-require': 'off',
|
||||
'n/no-unpublished-import': 'off',
|
||||
'n/no-unsupported-features/node-builtins': [
|
||||
'error',
|
||||
{
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -6,10 +6,7 @@ labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
||||
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
||||
|
||||
Are you using XOA or XO from the sources?
|
||||
**XOA or XO from the sources?**
|
||||
|
||||
If XOA:
|
||||
|
||||
@@ -18,7 +15,6 @@ If XOA:
|
||||
|
||||
If XO from the sources:
|
||||
|
||||
- Provide **your commit number**. If it's older than a week, we won't investigate
|
||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||
|
||||
@@ -42,6 +38,8 @@ If applicable, add screenshots to help explain your problem.
|
||||
**Environment (please provide the following information):**
|
||||
|
||||
- Node: [e.g. 16.12.1]
|
||||
- xo-server: [e.g. 5.82.3]
|
||||
- xo-web: [e.g. 5.87.0]
|
||||
- hypervisor: [e.g. XCP-ng 8.2.0]
|
||||
|
||||
**Additional context**
|
||||
|
||||
32
.github/workflows/ci.yml
vendored
32
.github/workflows/ci.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Continous Integration
|
||||
on: push
|
||||
jobs:
|
||||
CI:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# https://github.com/actions/checkout
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y curl qemu-utils python3-vmdkstream git libxml2-utils libfuse2 nbdkit
|
||||
- name: Cache Turbo
|
||||
# https://github.com/actions/cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: '**/node_modules/.cache/turbo'
|
||||
key: ${{ runner.os }}-turbo-cache
|
||||
- name: Setup Node environment
|
||||
# https://github.com/actions/setup-node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '18'
|
||||
cache: 'yarn'
|
||||
- name: Install project dependencies
|
||||
run: yarn
|
||||
- name: Build the project
|
||||
run: yarn build
|
||||
- name: Lint tests
|
||||
run: yarn test-lint
|
||||
- name: Integration tests
|
||||
run: sudo yarn test-integration
|
||||
13
.github/workflows/push.yml
vendored
Normal file
13
.github/workflows/push.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: CI
|
||||
on: [push]
|
||||
jobs:
|
||||
build:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
- run: docker-compose -f docker/docker-compose.dev.yml build
|
||||
- run: docker-compose -f docker/docker-compose.dev.yml up
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -34,4 +34,3 @@ yarn-error.log.*
|
||||
# code coverage
|
||||
.nyc_output/
|
||||
coverage/
|
||||
.turbo/
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
# Only check commit message if commit on master or first commit on another
|
||||
# branch to avoid bothering fix commits after reviews
|
||||
#
|
||||
# FIXME: does not properly run with git commit --amend
|
||||
if [ "$(git rev-parse --abbrev-ref HEAD)" = master ] || [ "$(git rev-list --count master..)" -eq 0 ]
|
||||
then
|
||||
npx --no -- commitlint --edit "$1"
|
||||
fi
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
npx lint-staged
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/async-each
|
||||
```
|
||||
> npm install --save @vates/async-each
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it, beforeEach } = require('test')
|
||||
const assert = require('assert').strict
|
||||
const { spy } = require('sinon')
|
||||
/* eslint-env jest */
|
||||
|
||||
const { asyncEach } = require('./')
|
||||
|
||||
@@ -36,18 +34,12 @@ describe('asyncEach', () => {
|
||||
})
|
||||
|
||||
it('works', async () => {
|
||||
const iteratee = spy(async () => {})
|
||||
const iteratee = jest.fn(async () => {})
|
||||
|
||||
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
|
||||
|
||||
assert.deepStrictEqual(
|
||||
iteratee.thisValues,
|
||||
Array.from(values, () => thisArg)
|
||||
)
|
||||
assert.deepStrictEqual(
|
||||
iteratee.args,
|
||||
Array.from(values, (value, index) => [value, index, iterable])
|
||||
)
|
||||
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
|
||||
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
|
||||
})
|
||||
;[1, 2, 4].forEach(concurrency => {
|
||||
it('respects a concurrency of ' + concurrency, async () => {
|
||||
@@ -57,7 +49,7 @@ describe('asyncEach', () => {
|
||||
values,
|
||||
async () => {
|
||||
++running
|
||||
assert.deepStrictEqual(running <= concurrency, true)
|
||||
expect(running).toBeLessThanOrEqual(concurrency)
|
||||
await randomDelay()
|
||||
--running
|
||||
},
|
||||
@@ -67,52 +59,42 @@ describe('asyncEach', () => {
|
||||
})
|
||||
|
||||
it('stops on first error when stopOnError is true', async () => {
|
||||
const tracker = new assert.CallTracker()
|
||||
|
||||
const error = new Error()
|
||||
const iteratee = tracker.calls((_, i) => {
|
||||
const iteratee = jest.fn((_, i) => {
|
||||
if (i === 1) {
|
||||
throw error
|
||||
}
|
||||
}, 2)
|
||||
assert.deepStrictEqual(
|
||||
await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true })),
|
||||
error
|
||||
)
|
||||
})
|
||||
|
||||
tracker.verify()
|
||||
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
|
||||
it('rejects AggregateError when stopOnError is false', async () => {
|
||||
const errors = []
|
||||
const iteratee = spy(() => {
|
||||
const iteratee = jest.fn(() => {
|
||||
const error = new Error()
|
||||
errors.push(error)
|
||||
throw error
|
||||
})
|
||||
|
||||
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
|
||||
assert.deepStrictEqual(error.errors, errors)
|
||||
assert.deepStrictEqual(
|
||||
iteratee.args,
|
||||
Array.from(values, (value, index) => [value, index, iterable])
|
||||
)
|
||||
expect(error.errors).toEqual(errors)
|
||||
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
|
||||
})
|
||||
|
||||
it('can be interrupted with an AbortSignal', async () => {
|
||||
const tracker = new assert.CallTracker()
|
||||
|
||||
const ac = new AbortController()
|
||||
const iteratee = tracker.calls((_, i) => {
|
||||
const iteratee = jest.fn((_, i) => {
|
||||
if (i === 1) {
|
||||
ac.abort()
|
||||
}
|
||||
}, 2)
|
||||
await assert.rejects(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal }), {
|
||||
message: 'asyncEach aborted',
|
||||
})
|
||||
|
||||
tracker.verify()
|
||||
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
|
||||
'asyncEach aborted'
|
||||
)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
)
|
||||
@@ -29,12 +29,6 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^15.0.1",
|
||||
"tap": "^16.3.0",
|
||||
"test": "^3.2.1"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/cached-dns.lookup):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/cached-dns.lookup
|
||||
```
|
||||
> npm install --save @vates/cached-dns.lookup
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-calls):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/coalesce-calls
|
||||
```
|
||||
> npm install --save @vates/coalesce-calls
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('assert')
|
||||
/* eslint-env jest */
|
||||
|
||||
const { coalesceCalls } = require('./')
|
||||
|
||||
@@ -24,13 +23,13 @@ describe('coalesceCalls', () => {
|
||||
const promise2 = fn(defer2.promise)
|
||||
|
||||
defer1.resolve('foo')
|
||||
assert.strictEqual(await promise1, 'foo')
|
||||
assert.strictEqual(await promise2, 'foo')
|
||||
expect(await promise1).toBe('foo')
|
||||
expect(await promise2).toBe('foo')
|
||||
|
||||
const defer3 = pDefer()
|
||||
const promise3 = fn(defer3.promise)
|
||||
|
||||
defer3.resolve('bar')
|
||||
assert.strictEqual(await promise3, 'bar')
|
||||
expect(await promise3).toBe('bar')
|
||||
})
|
||||
})
|
||||
@@ -30,10 +30,6 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/compose
|
||||
```
|
||||
> npm install --save @vates/compose
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert').strict
|
||||
/* eslint-env jest */
|
||||
|
||||
const { compose } = require('./')
|
||||
|
||||
@@ -10,42 +9,43 @@ const mul3 = x => x * 3
|
||||
|
||||
describe('compose()', () => {
|
||||
it('throws when no functions is passed', () => {
|
||||
assert.throws(() => compose(), TypeError)
|
||||
assert.throws(() => compose([]), TypeError)
|
||||
expect(() => compose()).toThrow(TypeError)
|
||||
expect(() => compose([])).toThrow(TypeError)
|
||||
})
|
||||
|
||||
it('applies from left to right', () => {
|
||||
assert.strictEqual(compose(add2, mul3)(5), 21)
|
||||
expect(compose(add2, mul3)(5)).toBe(21)
|
||||
})
|
||||
|
||||
it('accepts functions in an array', () => {
|
||||
assert.strictEqual(compose([add2, mul3])(5), 21)
|
||||
expect(compose([add2, mul3])(5)).toBe(21)
|
||||
})
|
||||
|
||||
it('can apply from right to left', () => {
|
||||
assert.strictEqual(compose({ right: true }, add2, mul3)(5), 17)
|
||||
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
|
||||
})
|
||||
|
||||
it('accepts options with functions in an array', () => {
|
||||
assert.strictEqual(compose({ right: true }, [add2, mul3])(5), 17)
|
||||
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
|
||||
})
|
||||
|
||||
it('can compose async functions', async () => {
|
||||
assert.strictEqual(
|
||||
expect(
|
||||
await compose(
|
||||
{ async: true },
|
||||
async x => x + 2,
|
||||
async x => x * 3
|
||||
)(5),
|
||||
21
|
||||
)
|
||||
)(5)
|
||||
).toBe(21)
|
||||
})
|
||||
|
||||
it('forwards all args to first function', () => {
|
||||
expect.assertions(1)
|
||||
|
||||
const expectedArgs = [Math.random(), Math.random()]
|
||||
compose(
|
||||
(...args) => {
|
||||
assert.deepEqual(args, expectedArgs)
|
||||
expect(args).toEqual(expectedArgs)
|
||||
},
|
||||
// add a second function to avoid the one function special case
|
||||
Function.prototype
|
||||
@@ -53,13 +53,15 @@ describe('compose()', () => {
|
||||
})
|
||||
|
||||
it('forwards context to all functions', () => {
|
||||
expect.assertions(2)
|
||||
|
||||
const expectedThis = {}
|
||||
compose(
|
||||
function () {
|
||||
assert.strictEqual(this, expectedThis)
|
||||
expect(this).toBe(expectedThis)
|
||||
},
|
||||
function () {
|
||||
assert.strictEqual(this, expectedThis)
|
||||
expect(this).toBe(expectedThis)
|
||||
}
|
||||
).call(expectedThis)
|
||||
})
|
||||
@@ -19,10 +19,6 @@
|
||||
"node": ">=7.6"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/decorate-with):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/decorate-with
|
||||
```
|
||||
> npm install --save @vates/decorate-with
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const { describe, it } = require('test')
|
||||
const { describe, it } = require('tap').mocha
|
||||
|
||||
const { decorateClass, decorateWith, decorateMethodsWith, perInstance } = require('./')
|
||||
|
||||
@@ -26,9 +26,9 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"test": "tap"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
"tap": "^16.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,65 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/diff
|
||||
|
||||
[](https://npmjs.org/package/@vates/diff)  [](https://bundlephobia.com/result?p=@vates/diff) [](https://npmjs.org/package/@vates/diff)
|
||||
|
||||
> Computes differences between two arrays, buffers or strings
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/diff):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/diff
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,37 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
/**
|
||||
* Compare two data arrays, buffers or strings and invoke the provided callback function for each difference.
|
||||
*
|
||||
* @template {Array|Buffer|string} T
|
||||
* @param {Array|Buffer|string} data1 - The first data array or buffer to compare.
|
||||
* @param {T} data2 - The second data array or buffer to compare.
|
||||
* @param {(index: number, diff: T) => void} [cb] - The callback function to invoke for each difference. If not provided, an array of differences will be returned.
|
||||
* @returns {Array<number|T>|undefined} - An array of differences if no callback is provided, otherwise undefined.
|
||||
*/
|
||||
module.exports = function diff(data1, data2, cb) {
|
||||
let result
|
||||
if (cb === undefined) {
|
||||
result = []
|
||||
cb = result.push.bind(result)
|
||||
}
|
||||
|
||||
const n1 = data1.length
|
||||
const n2 = data2.length
|
||||
const n = Math.min(n1, n2)
|
||||
for (let i = 0; i < n; ++i) {
|
||||
if (data1[i] !== data2[i]) {
|
||||
let j = i + 1
|
||||
while (j < n && data1[j] !== data2[j]) {
|
||||
++j
|
||||
}
|
||||
cb(i, data2.slice(i, j))
|
||||
i = j
|
||||
}
|
||||
}
|
||||
if (n1 !== n2) {
|
||||
cb(n, n1 < n2 ? data2.slice(n) : data2.slice(0, 0))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert/strict')
|
||||
const test = require('test')
|
||||
|
||||
const diff = require('./index.js')
|
||||
|
||||
test('data of equal length', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz', 8, 'foo'])
|
||||
})
|
||||
|
||||
test('data1 is longer', function () {
|
||||
const data1 = 'foo bar'
|
||||
const data2 = 'foo'
|
||||
assert.deepEqual(diff(data1, data2), [3, ''])
|
||||
})
|
||||
|
||||
test('data2 is longer', function () {
|
||||
const data1 = 'foo'
|
||||
const data2 = 'foo bar'
|
||||
assert.deepEqual(diff(data1, data2), [3, ' bar'])
|
||||
})
|
||||
|
||||
test('with arrays', function () {
|
||||
const data1 = 'foo bar baz'.split('')
|
||||
const data2 = 'baz bar foo'.split('')
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz'.split(''), 8, 'foo'.split('')])
|
||||
})
|
||||
|
||||
test('with buffers', function () {
|
||||
const data1 = Buffer.from('foo bar baz')
|
||||
const data2 = Buffer.from('baz bar foo')
|
||||
assert.deepEqual(diff(data1, data2), [0, Buffer.from('baz'), 8, Buffer.from('foo')])
|
||||
})
|
||||
|
||||
test('cb param', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
|
||||
const calls = []
|
||||
const cb = (...args) => calls.push(args)
|
||||
|
||||
diff(data1, data2, cb)
|
||||
|
||||
assert.deepEqual(calls, [
|
||||
[0, 'baz'],
|
||||
[8, 'foo'],
|
||||
])
|
||||
})
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/diff",
|
||||
"description": "Computes differences between two arrays, buffers or strings",
|
||||
"keywords": [
|
||||
"array",
|
||||
"binary",
|
||||
"buffer",
|
||||
"diff",
|
||||
"differences",
|
||||
"string"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/diff",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/diff",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/disposable
|
||||
```
|
||||
> npm install --save @vates/disposable
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const { useFakeTimers, spy, assert } = require('sinon')
|
||||
/* eslint-env jest */
|
||||
|
||||
const { createDebounceResource } = require('./debounceResource')
|
||||
|
||||
const clock = useFakeTimers()
|
||||
jest.useFakeTimers()
|
||||
|
||||
describe('debounceResource()', () => {
|
||||
it('calls the resource disposer after 10 seconds', async () => {
|
||||
const debounceResource = createDebounceResource()
|
||||
const delay = 10e3
|
||||
const dispose = spy()
|
||||
const dispose = jest.fn()
|
||||
|
||||
const resource = await debounceResource(
|
||||
Promise.resolve({
|
||||
@@ -23,10 +22,10 @@ describe('debounceResource()', () => {
|
||||
|
||||
resource.dispose()
|
||||
|
||||
assert.notCalled(dispose)
|
||||
expect(dispose).not.toBeCalled()
|
||||
|
||||
clock.tick(delay)
|
||||
jest.advanceTimersByTime(delay)
|
||||
|
||||
assert.called(dispose)
|
||||
expect(dispose).toBeCalled()
|
||||
})
|
||||
})
|
||||
@@ -1,14 +1,13 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const { spy, assert } = require('sinon')
|
||||
/* eslint-env jest */
|
||||
|
||||
const { deduped } = require('./deduped')
|
||||
|
||||
describe('deduped()', () => {
|
||||
it('calls the resource function only once', async () => {
|
||||
const value = {}
|
||||
const getResource = spy(async () => ({
|
||||
const getResource = jest.fn(async () => ({
|
||||
value,
|
||||
dispose: Function.prototype,
|
||||
}))
|
||||
@@ -18,13 +17,13 @@ describe('deduped()', () => {
|
||||
const { value: v1 } = await dedupedGetResource()
|
||||
const { value: v2 } = await dedupedGetResource()
|
||||
|
||||
assert.calledOnce(getResource)
|
||||
assert.match(v1, value)
|
||||
assert.match(v2, value)
|
||||
expect(getResource).toHaveBeenCalledTimes(1)
|
||||
expect(v1).toBe(value)
|
||||
expect(v2).toBe(value)
|
||||
})
|
||||
|
||||
it('only disposes the source disposable when its all copies dispose', async () => {
|
||||
const dispose = spy()
|
||||
const dispose = jest.fn()
|
||||
const getResource = async () => ({
|
||||
value: '',
|
||||
dispose,
|
||||
@@ -37,35 +36,35 @@ describe('deduped()', () => {
|
||||
|
||||
d1()
|
||||
|
||||
assert.notCalled(dispose)
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
|
||||
d2()
|
||||
|
||||
assert.calledOnce(dispose)
|
||||
expect(dispose).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('works with sync factory', () => {
|
||||
const value = {}
|
||||
const dispose = spy()
|
||||
const dispose = jest.fn()
|
||||
const dedupedGetResource = deduped(() => ({ value, dispose }))
|
||||
|
||||
const d1 = dedupedGetResource()
|
||||
assert.match(d1.value, value)
|
||||
expect(d1.value).toBe(value)
|
||||
|
||||
const d2 = dedupedGetResource()
|
||||
assert.match(d2.value, value)
|
||||
expect(d2.value).toBe(value)
|
||||
|
||||
d1.dispose()
|
||||
|
||||
assert.notCalled(dispose)
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
|
||||
d2.dispose()
|
||||
|
||||
assert.calledOnce(dispose)
|
||||
expect(dispose).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('no race condition on dispose before async acquisition', async () => {
|
||||
const dispose = spy()
|
||||
const dispose = jest.fn()
|
||||
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
|
||||
|
||||
const d1 = await dedupedGetResource()
|
||||
@@ -74,6 +73,6 @@ describe('deduped()', () => {
|
||||
|
||||
d1.dispose()
|
||||
|
||||
assert.notCalled(dispose)
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -14,22 +14,17 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.4",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^15.0.1",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/event-listeners-manager):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/event-listeners-manager
|
||||
```
|
||||
> npm install --save @vates/event-listeners-manager
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -4,6 +4,9 @@ const LRU = require('lru-cache')
|
||||
const Fuse = require('fuse-native')
|
||||
const { VhdSynthetic } = require('vhd-lib')
|
||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { warn } = createLogger('vates:fuse-vhd')
|
||||
|
||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||
const stat = st => ({
|
||||
@@ -54,7 +57,9 @@ exports.mount = Disposable.factory(async function* mount(handler, diskPath, moun
|
||||
},
|
||||
read(path, fd, buf, len, pos, cb) {
|
||||
if (path === '/vhd0') {
|
||||
return vhd.readRawData(pos, len, cache, buf).then(cb)
|
||||
return vhd
|
||||
.readRawData(pos, len, cache, buf)
|
||||
.then(cb)
|
||||
}
|
||||
throw new Error(`read file ${path} not exists`)
|
||||
},
|
||||
@@ -62,5 +67,5 @@ exports.mount = Disposable.factory(async function* mount(handler, diskPath, moun
|
||||
return new Disposable(
|
||||
() => fromCallback(() => fuse.unmount()),
|
||||
fromCallback(() => fuse.mount())
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "1.0.0",
|
||||
"version": "0.0.1",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -18,10 +18,11 @@
|
||||
"node": ">=10.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.4.0"
|
||||
"vhd-lib": "^4.0.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/multi-key-map
|
||||
```
|
||||
> npm install --save @vates/multi-key-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert')
|
||||
/* eslint-env jest */
|
||||
|
||||
const { MultiKeyMap } = require('./')
|
||||
|
||||
@@ -29,9 +28,9 @@ describe('MultiKeyMap', () => {
|
||||
|
||||
keys.forEach((key, i) => {
|
||||
// copy the key to make sure the array itself is not the key
|
||||
assert.strictEqual(map.get(key.slice()), values[i])
|
||||
expect(map.get(key.slice())).toBe(values[i])
|
||||
map.delete(key.slice())
|
||||
assert.strictEqual(map.get(key.slice()), undefined)
|
||||
expect(map.get(key.slice())).toBe(undefined)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -23,10 +23,6 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
### `new NdbClient({address, exportname, secure = true, port = 10809})`
|
||||
|
||||
create a new nbd client
|
||||
|
||||
```js
|
||||
import NbdClient from '@vates/nbd-client'
|
||||
const client = new NbdClient({
|
||||
address: 'MY_NBD_HOST',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: 'Server certificate', // optional, will use encrypted link if provided
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
const block = await client.readBlock(blockIndex, BlockSize)
|
||||
await client.disconnect()
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,47 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/nbd-client
|
||||
|
||||
[](https://npmjs.org/package/@vates/nbd-client)  [](https://bundlephobia.com/result?p=@vates/nbd-client) [](https://npmjs.org/package/@vates/nbd-client)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/nbd-client):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/nbd-client
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### `new NdbClient({address, exportname, secure = true, port = 10809})`
|
||||
|
||||
create a new nbd client
|
||||
|
||||
```js
|
||||
import NbdClient from '@vates/nbd-client'
|
||||
const client = new NbdClient({
|
||||
address: 'MY_NBD_HOST',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: 'Server certificate', // optional, will use encrypted link if provided
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
const block = await client.readBlock(blockIndex, BlockSize)
|
||||
await client.disconnect()
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,42 +0,0 @@
|
||||
'use strict'
|
||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
exports.NBD_OPT_EXPORT_NAME = 1
|
||||
exports.NBD_OPT_ABORT = 2
|
||||
exports.NBD_OPT_LIST = 3
|
||||
exports.NBD_OPT_STARTTLS = 5
|
||||
exports.NBD_OPT_INFO = 6
|
||||
exports.NBD_OPT_GO = 7
|
||||
|
||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
exports.NBD_CMD_READ = 0
|
||||
exports.NBD_CMD_WRITE = 1
|
||||
exports.NBD_CMD_DISC = 2
|
||||
exports.NBD_CMD_FLUSH = 3
|
||||
exports.NBD_CMD_TRIM = 4
|
||||
exports.NBD_CMD_CACHE = 5
|
||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
||||
exports.NBD_CMD_RESIZE = 8
|
||||
|
||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
exports.NBD_REPLY_ACK = 1
|
||||
|
||||
exports.NBD_DEFAULT_PORT = 10809
|
||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,351 +0,0 @@
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
const { Socket } = require('node:net')
|
||||
const { connect } = require('node:tls')
|
||||
const {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_BLOCK_SIZE,
|
||||
NBD_DEFAULT_PORT,
|
||||
NBD_FLAG_FIXED_NEWSTYLE,
|
||||
NBD_FLAG_HAS_FLAGS,
|
||||
NBD_OPT_EXPORT_NAME,
|
||||
NBD_OPT_REPLY_MAGIC,
|
||||
NBD_OPT_STARTTLS,
|
||||
NBD_REPLY_ACK,
|
||||
NBD_REPLY_MAGIC,
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} = require('./constants.js')
|
||||
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
module.exports = class NbdClient {
|
||||
#serverAddress
|
||||
#serverCert
|
||||
#serverPort
|
||||
#serverSocket
|
||||
|
||||
#exportName
|
||||
#exportSize
|
||||
|
||||
#waitBeforeReconnect
|
||||
#readAhead
|
||||
#readBlockRetries
|
||||
#reconnectRetry
|
||||
#connectTimeout
|
||||
|
||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||
|
||||
#waitingForResponse // there is already a listenner waiting for a response
|
||||
#nextCommandQueryId = BigInt(0)
|
||||
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
|
||||
#connected = false
|
||||
|
||||
#reconnectingPromise
|
||||
constructor(
|
||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
||||
) {
|
||||
this.#serverAddress = address
|
||||
this.#serverPort = port
|
||||
this.#exportName = exportname
|
||||
this.#serverCert = cert
|
||||
this.#waitBeforeReconnect = waitBeforeReconnect
|
||||
this.#readAhead = readAhead
|
||||
this.#readBlockRetries = readBlockRetries
|
||||
this.#reconnectRetry = reconnectRetry
|
||||
this.#connectTimeout = connectTimeout
|
||||
}
|
||||
|
||||
get exportSize() {
|
||||
return this.#exportSize
|
||||
}
|
||||
|
||||
async #tlsConnect() {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.#serverSocket = connect({
|
||||
socket: this.#serverSocket,
|
||||
rejectUnauthorized: false,
|
||||
cert: this.#serverCert,
|
||||
})
|
||||
this.#serverSocket.once('error', reject)
|
||||
this.#serverSocket.once('secureConnect', () => {
|
||||
this.#serverSocket.removeListener('error', reject)
|
||||
resolve()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// mandatory , at least to start the handshake
|
||||
async #unsecureConnect() {
|
||||
this.#serverSocket = new Socket()
|
||||
return new Promise((resolve, reject) => {
|
||||
this.#serverSocket.connect(this.#serverPort, this.#serverAddress)
|
||||
this.#serverSocket.once('error', reject)
|
||||
this.#serverSocket.once('connect', () => {
|
||||
this.#serverSocket.removeListener('error', reject)
|
||||
resolve()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async #connect() {
|
||||
// first we connect to the server without tls, and then we upgrade the connection
|
||||
// to tls during the handshake
|
||||
await this.#unsecureConnect()
|
||||
await this.#handshake()
|
||||
this.#connected = true
|
||||
// reset internal state if we reconnected a nbd client
|
||||
this.#commandQueryBacklog = new Map()
|
||||
this.#waitingForResponse = false
|
||||
}
|
||||
async connect() {
|
||||
return pTimeout.call(this.#connect(), this.#connectTimeout)
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||
await this.#write(buffer)
|
||||
await this.#serverSocket.destroy()
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
}
|
||||
|
||||
#clearReconnectPromise = () => {
|
||||
this.#reconnectingPromise = undefined
|
||||
}
|
||||
|
||||
async #reconnect() {
|
||||
await this.disconnect().catch(() => {})
|
||||
await pDelay(this.#waitBeforeReconnect) // need to let the xapi clean things on its side
|
||||
await this.connect()
|
||||
}
|
||||
|
||||
async reconnect() {
|
||||
// we need to ensure reconnections do not occur in parallel
|
||||
if (this.#reconnectingPromise === undefined) {
|
||||
this.#reconnectingPromise = pRetry(() => this.#reconnect(), {
|
||||
tries: this.#reconnectRetry,
|
||||
})
|
||||
this.#reconnectingPromise.then(this.#clearReconnectPromise, this.#clearReconnectPromise)
|
||||
}
|
||||
|
||||
return this.#reconnectingPromise
|
||||
}
|
||||
|
||||
// we can use individual read/write from the socket here since there is no concurrency
|
||||
async #sendOption(option, buffer = Buffer.alloc(0)) {
|
||||
await this.#write(OPTS_MAGIC)
|
||||
await this.#writeInt32(option)
|
||||
await this.#writeInt32(buffer.length)
|
||||
await this.#write(buffer)
|
||||
assert.strictEqual(await this.#readInt64(), NBD_OPT_REPLY_MAGIC) // magic number everywhere
|
||||
assert.strictEqual(await this.#readInt32(), option) // the option passed
|
||||
assert.strictEqual(await this.#readInt32(), NBD_REPLY_ACK) // ACK
|
||||
const length = await this.#readInt32()
|
||||
assert.strictEqual(length, 0) // length
|
||||
}
|
||||
|
||||
// we can use individual read/write from the socket here since there is only one handshake at once, no concurrency
|
||||
async #handshake() {
|
||||
assert((await this.#read(8)).equals(INIT_PASSWD))
|
||||
assert((await this.#read(8)).equals(OPTS_MAGIC))
|
||||
const flagsBuffer = await this.#read(2)
|
||||
const flags = flagsBuffer.readInt16BE(0)
|
||||
assert.strictEqual(flags & NBD_FLAG_FIXED_NEWSTYLE, NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
|
||||
await this.#writeInt32(NBD_FLAG_FIXED_NEWSTYLE) // client also support NBD_FLAG_C_FIXED_NEWSTYLE
|
||||
|
||||
if (this.#serverCert !== undefined) {
|
||||
// upgrade socket to TLS if needed
|
||||
await this.#sendOption(NBD_OPT_STARTTLS)
|
||||
await this.#tlsConnect()
|
||||
}
|
||||
|
||||
// send export name we want to access.
|
||||
// it's implictly closing the negociation phase.
|
||||
await this.#write(OPTS_MAGIC)
|
||||
await this.#writeInt32(NBD_OPT_EXPORT_NAME)
|
||||
const exportNameBuffer = Buffer.from(this.#exportName)
|
||||
await this.#writeInt32(exportNameBuffer.length)
|
||||
await this.#write(exportNameBuffer)
|
||||
|
||||
// 8 (export size ) + 2 (flags) + 124 zero = 134
|
||||
// must read all to ensure nothing stays in the buffer
|
||||
const answer = await this.#read(134)
|
||||
this.#exportSize = answer.readBigUInt64BE(0)
|
||||
const transmissionFlags = answer.readInt16BE(8)
|
||||
assert.strictEqual(transmissionFlags & NBD_FLAG_HAS_FLAGS, NBD_FLAG_HAS_FLAGS, 'NBD_FLAG_HAS_FLAGS') // must always be 1 by the norm
|
||||
|
||||
// note : xapi server always send NBD_FLAG_READ_ONLY (3) as a flag
|
||||
}
|
||||
|
||||
#read(length) {
|
||||
return readChunkStrict(this.#serverSocket, length)
|
||||
}
|
||||
|
||||
#write(buffer) {
|
||||
return fromCallback.call(this.#serverSocket, 'write', buffer)
|
||||
}
|
||||
|
||||
async #readInt32() {
|
||||
const buffer = await this.#read(4)
|
||||
return buffer.readInt32BE(0)
|
||||
}
|
||||
|
||||
async #readInt64() {
|
||||
const buffer = await this.#read(8)
|
||||
return buffer.readBigUInt64BE(0)
|
||||
}
|
||||
|
||||
#writeInt32(int) {
|
||||
const buffer = Buffer.alloc(4)
|
||||
buffer.writeInt32BE(int)
|
||||
return this.#write(buffer)
|
||||
}
|
||||
|
||||
// when one read fail ,stop everything
|
||||
async #rejectAll(error) {
|
||||
this.#commandQueryBacklog.forEach(({ reject }) => {
|
||||
reject(error)
|
||||
})
|
||||
}
|
||||
|
||||
async #readBlockResponse() {
|
||||
// ensure at most one read occur in parallel
|
||||
if (this.#waitingForResponse) {
|
||||
return
|
||||
}
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const magic = await this.#readInt32()
|
||||
|
||||
if (magic !== NBD_REPLY_MAGIC) {
|
||||
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
|
||||
}
|
||||
|
||||
const error = await this.#readInt32()
|
||||
if (error !== 0) {
|
||||
// @todo use error code from constants.mjs
|
||||
throw new Error(`GOT ERROR CODE : ${error}`)
|
||||
}
|
||||
|
||||
const blockQueryId = await this.#readInt64()
|
||||
const query = this.#commandQueryBacklog.get(blockQueryId)
|
||||
if (!query) {
|
||||
throw new Error(` no query associated with id ${blockQueryId}`)
|
||||
}
|
||||
this.#commandQueryBacklog.delete(blockQueryId)
|
||||
const data = await this.#read(query.size)
|
||||
query.resolve(data)
|
||||
this.#waitingForResponse = false
|
||||
if (this.#commandQueryBacklog.size > 0) {
|
||||
// it doesn't throw directly but will throw all relevant promise on failure
|
||||
this.#readBlockResponse()
|
||||
}
|
||||
} catch (error) {
|
||||
// reject all the promises
|
||||
// we don't need to call readBlockResponse on failure
|
||||
// since we will empty the backlog
|
||||
await this.#rejectAll(error)
|
||||
}
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
}
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
|
||||
// create and send command at once to ensure there is no concurrency issue
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a simple block read
|
||||
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
|
||||
buffer.writeBigUInt64BE(queryId, 8)
|
||||
// byte offset in the raw disk
|
||||
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
|
||||
buffer.writeInt32BE(size, 24)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
function decoratedReject(error) {
|
||||
error.index = index
|
||||
error.size = size
|
||||
reject(error)
|
||||
}
|
||||
|
||||
// this will handle one block response, but it can be another block
|
||||
// since server does not guaranty to handle query in order
|
||||
this.#commandQueryBacklog.set(queryId, {
|
||||
size,
|
||||
resolve,
|
||||
reject: decoratedReject,
|
||||
})
|
||||
// really send the command to the server
|
||||
this.#write(buffer).catch(decoratedReject)
|
||||
|
||||
// #readBlockResponse never throws directly
|
||||
// but if it fails it will reject all the promises in the backlog
|
||||
this.#readBlockResponse()
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(exportSize / chunkSize)
|
||||
for (let index = 0; index < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/nbd-client",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/nbd-client",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/nbd-client",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.2.0",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^1.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap *.spec.js"
|
||||
}
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
### Usual workflow
|
||||
|
||||
> This section presents how this library should be used to implement a classic two factor authentification.
|
||||
|
||||
#### Setup
|
||||
|
||||
```js
|
||||
import { generateSecret, generateTotp } from '@vates/otp'
|
||||
import QrCode from 'qrcode'
|
||||
|
||||
// Generates a secret that will be shared by both the service and the user:
|
||||
const secret = generateSecret()
|
||||
|
||||
// Stores the secret in the service:
|
||||
await currentUser.saveOtpSecret(secret)
|
||||
|
||||
// Generates an URI to present to the user
|
||||
const uri = generateTotpUri({ secret })
|
||||
|
||||
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
|
||||
const qr = await QrCode.toDataURL(uri)
|
||||
```
|
||||
|
||||
#### Authentication
|
||||
|
||||
```js
|
||||
import { verifyTotp } from '@vates/otp'
|
||||
|
||||
// Verifies a `token` entered by the user against a `secret` generated during setup.
|
||||
if (await verifyTotp(token, { secret })) {
|
||||
console.log('authenticated!')
|
||||
}
|
||||
```
|
||||
|
||||
### API
|
||||
|
||||
#### Secret
|
||||
|
||||
```js
|
||||
import { generateSecret } from '@vates/otp'
|
||||
|
||||
const secret = generateSecret()
|
||||
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
|
||||
```
|
||||
|
||||
#### HOTP
|
||||
|
||||
> This is likely not what you want to use, see TOTP below instead.
|
||||
|
||||
```js
|
||||
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
|
||||
|
||||
// a sequence number, see HOTP specification
|
||||
const counter = 0
|
||||
|
||||
// generate a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
const token = await generateHotp({ counter, secret })
|
||||
// '239988'
|
||||
|
||||
// verify a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
const isValid = await verifyHotp(token, { counter, secret })
|
||||
// true
|
||||
|
||||
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
|
||||
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
|
||||
```
|
||||
|
||||
Optional params and their default values:
|
||||
|
||||
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
|
||||
|
||||
#### TOTP
|
||||
|
||||
```js
|
||||
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
|
||||
|
||||
// generate a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
// - period
|
||||
// - timestamp
|
||||
const token = await generateTotp({ secret })
|
||||
// '632869'
|
||||
|
||||
// verify a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
// - period
|
||||
// - timestamp
|
||||
// - window
|
||||
const isValid = await verifyTotp(token, { secret })
|
||||
// true
|
||||
|
||||
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
// - period
|
||||
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
|
||||
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
|
||||
```
|
||||
|
||||
Optional params and their default values:
|
||||
|
||||
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
|
||||
- `period = 30`: number of seconds a token is valid
|
||||
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
|
||||
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
|
||||
|
||||
#### Verification from URI
|
||||
|
||||
```js
|
||||
import { verifyFromUri } from '@vates/otp'
|
||||
|
||||
// Verify the token using all the information contained in the URI
|
||||
const isValid = await verifyFromUri(token, uri)
|
||||
// true
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,163 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/otp
|
||||
|
||||
[](https://npmjs.org/package/@vates/otp)  [](https://bundlephobia.com/result?p=@vates/otp) [](https://npmjs.org/package/@vates/otp)
|
||||
|
||||
> Minimal HTOP/TOTP implementation
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/otp):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/otp
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Usual workflow
|
||||
|
||||
> This section presents how this library should be used to implement a classic two factor authentification.
|
||||
|
||||
#### Setup
|
||||
|
||||
```js
|
||||
import { generateSecret, generateTotp } from '@vates/otp'
|
||||
import QrCode from 'qrcode'
|
||||
|
||||
// Generates a secret that will be shared by both the service and the user:
|
||||
const secret = generateSecret()
|
||||
|
||||
// Stores the secret in the service:
|
||||
await currentUser.saveOtpSecret(secret)
|
||||
|
||||
// Generates an URI to present to the user
|
||||
const uri = generateTotpUri({ secret })
|
||||
|
||||
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
|
||||
const qr = await QrCode.toDataURL(uri)
|
||||
```
|
||||
|
||||
#### Authentication
|
||||
|
||||
```js
|
||||
import { verifyTotp } from '@vates/otp'
|
||||
|
||||
// Verifies a `token` entered by the user against a `secret` generated during setup.
|
||||
if (await verifyTotp(token, { secret })) {
|
||||
console.log('authenticated!')
|
||||
}
|
||||
```
|
||||
|
||||
### API
|
||||
|
||||
#### Secret
|
||||
|
||||
```js
|
||||
import { generateSecret } from '@vates/otp'
|
||||
|
||||
const secret = generateSecret()
|
||||
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
|
||||
```
|
||||
|
||||
#### HOTP
|
||||
|
||||
> This is likely not what you want to use, see TOTP below instead.
|
||||
|
||||
```js
|
||||
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
|
||||
|
||||
// a sequence number, see HOTP specification
|
||||
const counter = 0
|
||||
|
||||
// generate a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
const token = await generateHotp({ counter, secret })
|
||||
// '239988'
|
||||
|
||||
// verify a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
const isValid = await verifyHotp(token, { counter, secret })
|
||||
// true
|
||||
|
||||
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
|
||||
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
|
||||
```
|
||||
|
||||
Optional params and their default values:
|
||||
|
||||
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
|
||||
|
||||
#### TOTP
|
||||
|
||||
```js
|
||||
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
|
||||
|
||||
// generate a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
// - period
|
||||
// - timestamp
|
||||
const token = await generateTotp({ secret })
|
||||
// '632869'
|
||||
|
||||
// verify a token
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
// - period
|
||||
// - timestamp
|
||||
// - window
|
||||
const isValid = await verifyTotp(token, { secret })
|
||||
// true
|
||||
|
||||
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
|
||||
//
|
||||
// optional params:
|
||||
// - digits
|
||||
// - period
|
||||
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
|
||||
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
|
||||
```
|
||||
|
||||
Optional params and their default values:
|
||||
|
||||
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
|
||||
- `period = 30`: number of seconds a token is valid
|
||||
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
|
||||
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
|
||||
|
||||
#### Verification from URI
|
||||
|
||||
```js
|
||||
import { verifyFromUri } from '@vates/otp'
|
||||
|
||||
// Verify the token using all the information contained in the URI
|
||||
const isValid = await verifyFromUri(token, uri)
|
||||
// true
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,111 +0,0 @@
|
||||
import { base32 } from 'rfc4648'
|
||||
import { webcrypto } from 'node:crypto'
|
||||
|
||||
const { subtle } = webcrypto
|
||||
|
||||
function assert(name, value) {
|
||||
if (!value) {
|
||||
throw new TypeError('invalid value for param ' + name)
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/google/google-authenticator/wiki/Key-Uri-Format
|
||||
function generateUri(protocol, label, params) {
|
||||
assert('label', typeof label === 'string')
|
||||
assert('secret', typeof params.secret === 'string')
|
||||
|
||||
let path = encodeURIComponent(label)
|
||||
|
||||
const { issuer } = params
|
||||
if (issuer !== undefined) {
|
||||
path = encodeURIComponent(issuer) + ':' + path
|
||||
}
|
||||
|
||||
const query = Object.entries(params)
|
||||
.filter(_ => _[1] !== undefined)
|
||||
.map(([key, value]) => key + '=' + encodeURIComponent(value))
|
||||
.join('&')
|
||||
|
||||
return `otpauth://${protocol}/${path}?${query}`
|
||||
}
|
||||
|
||||
export function generateSecret() {
|
||||
// https://www.rfc-editor.org/rfc/rfc4226 recommends 160 bits (i.e. 20 bytes)
|
||||
const data = new Uint8Array(20)
|
||||
webcrypto.getRandomValues(data)
|
||||
return base32.stringify(data, { pad: false })
|
||||
}
|
||||
|
||||
const DIGITS = 6
|
||||
|
||||
// https://www.rfc-editor.org/rfc/rfc4226
|
||||
export async function generateHotp({ counter, digits = DIGITS, secret }) {
|
||||
const data = new Uint8Array(8)
|
||||
new DataView(data.buffer).setBigInt64(0, BigInt(counter), false)
|
||||
|
||||
const key = await subtle.importKey(
|
||||
'raw',
|
||||
base32.parse(secret, { loose: true }),
|
||||
{ name: 'HMAC', hash: 'SHA-1' },
|
||||
false,
|
||||
['sign', 'verify']
|
||||
)
|
||||
const digest = new DataView(await subtle.sign('HMAC', key, data))
|
||||
|
||||
const offset = digest.getUint8(digest.byteLength - 1) & 0xf
|
||||
const p = digest.getUint32(offset) & 0x7f_ff_ff_ff
|
||||
|
||||
return String(p % Math.pow(10, digits)).padStart(digits, '0')
|
||||
}
|
||||
|
||||
export function generateHotpUri({ counter, digits, issuer, label, secret }) {
|
||||
assert('counter', typeof counter === 'number')
|
||||
return generateUri('hotp', label, { counter, digits, issuer, secret })
|
||||
}
|
||||
|
||||
export async function verifyHotp(token, opts) {
|
||||
return token === (await generateHotp(opts))
|
||||
}
|
||||
|
||||
function totpCounter(period = 30, timestamp = Math.floor(Date.now() / 1e3)) {
|
||||
return Math.floor(timestamp / period)
|
||||
}
|
||||
|
||||
// https://www.rfc-editor.org/rfc/rfc6238.html
|
||||
export async function generateTotp({ period, timestamp, ...opts }) {
|
||||
opts.counter = totpCounter(period, timestamp)
|
||||
return await generateHotp(opts)
|
||||
}
|
||||
|
||||
export function generateTotpUri({ digits, issuer, label, period, secret }) {
|
||||
return generateUri('totp', label, { digits, issuer, period, secret })
|
||||
}
|
||||
|
||||
export async function verifyTotp(token, { period, timestamp, window = 1, ...opts }) {
|
||||
const counter = totpCounter(period, timestamp)
|
||||
const end = counter + window
|
||||
opts.counter = counter - window
|
||||
while (opts.counter <= end) {
|
||||
if (token === (await generateHotp(opts))) {
|
||||
return true
|
||||
}
|
||||
opts.counter += 1
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
export async function verifyFromUri(token, uri) {
|
||||
const url = new URL(uri)
|
||||
assert('protocol', url.protocol === 'otpauth:')
|
||||
|
||||
const { host } = url
|
||||
const opts = Object.fromEntries(url.searchParams.entries())
|
||||
if (host === 'hotp') {
|
||||
return await verifyHotp(token, opts)
|
||||
}
|
||||
if (host === 'totp') {
|
||||
return await verifyTotp(token, opts)
|
||||
}
|
||||
|
||||
assert('host', false)
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
import { strict as assert } from 'node:assert'
|
||||
import { describe, it } from 'tap/mocha'
|
||||
|
||||
import {
|
||||
generateHotp,
|
||||
generateHotpUri,
|
||||
generateSecret,
|
||||
generateTotp,
|
||||
generateTotpUri,
|
||||
verifyHotp,
|
||||
verifyTotp,
|
||||
} from './index.mjs'
|
||||
|
||||
describe('generateSecret', function () {
|
||||
it('generates a string of 32 chars', async function () {
|
||||
const secret = generateSecret()
|
||||
assert.equal(typeof secret, 'string')
|
||||
assert.equal(secret.length, 32)
|
||||
})
|
||||
|
||||
it('generates a different secret at each call', async function () {
|
||||
assert.notEqual(generateSecret(), generateSecret())
|
||||
})
|
||||
})
|
||||
|
||||
describe('HOTP', function () {
|
||||
it('generate and verify valid tokens', async function () {
|
||||
for (const [token, opts] of Object.entries({
|
||||
382752: {
|
||||
counter: -3088,
|
||||
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
|
||||
},
|
||||
163376: {
|
||||
counter: 30598,
|
||||
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
|
||||
},
|
||||
})) {
|
||||
assert.equal(await generateHotp(opts), token)
|
||||
assert(await verifyHotp(token, opts))
|
||||
}
|
||||
})
|
||||
|
||||
describe('generateHotpUri', function () {
|
||||
const opts = {
|
||||
counter: 59732,
|
||||
label: 'the label',
|
||||
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
}
|
||||
|
||||
Object.entries({
|
||||
'without optional params': [
|
||||
opts,
|
||||
'otpauth://hotp/the%20label?counter=59732&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
],
|
||||
'with issuer': [
|
||||
{ ...opts, issuer: 'the issuer' },
|
||||
'otpauth://hotp/the%20issuer:the%20label?counter=59732&issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
],
|
||||
'with digits': [
|
||||
{ ...opts, digits: 7 },
|
||||
'otpauth://hotp/the%20label?counter=59732&digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
],
|
||||
}).forEach(([title, [opts, uri]]) => {
|
||||
it(title, async function () {
|
||||
assert.strictEqual(generateHotpUri(opts), uri)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('TOTP', function () {
|
||||
Object.entries({
|
||||
'033702': {
|
||||
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
|
||||
timestamp: 1665416296,
|
||||
period: 30,
|
||||
},
|
||||
107250: {
|
||||
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
|
||||
timestamp: 1665416674,
|
||||
period: 60,
|
||||
},
|
||||
}).forEach(([token, opts]) => {
|
||||
it('works', async function () {
|
||||
assert.equal(await generateTotp(opts), token)
|
||||
assert(await verifyTotp(token, opts))
|
||||
})
|
||||
})
|
||||
|
||||
describe('generateHotpUri', function () {
|
||||
const opts = {
|
||||
label: 'the label',
|
||||
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
}
|
||||
|
||||
Object.entries({
|
||||
'without optional params': [opts, 'otpauth://totp/the%20label?secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX'],
|
||||
'with issuer': [
|
||||
{ ...opts, issuer: 'the issuer' },
|
||||
'otpauth://totp/the%20issuer:the%20label?issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
],
|
||||
'with digits': [
|
||||
{ ...opts, digits: 7 },
|
||||
'otpauth://totp/the%20label?digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
|
||||
],
|
||||
}).forEach(([title, [opts, uri]]) => {
|
||||
it(title, async function () {
|
||||
assert.strictEqual(generateTotpUri(opts), uri)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/otp",
|
||||
"description": "Minimal HTOP/TOTP implementation",
|
||||
"keywords": [
|
||||
"2fa",
|
||||
"authenticator",
|
||||
"hotp",
|
||||
"otp",
|
||||
"totp"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/otp",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"main": "index.mjs",
|
||||
"repository": {
|
||||
"directory": "@vates/otp",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=15"
|
||||
},
|
||||
"dependencies": {
|
||||
"rfc4648": "^1.5.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0"
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/parse-duration):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/parse-duration
|
||||
```
|
||||
> npm install --save @vates/parse-duration
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
|
||||
|
||||
```js
|
||||
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
|
||||
const compositePredicate = every(undefined, some(predicate2, undefined))
|
||||
|
||||
// ends up as
|
||||
|
||||
@@ -36,21 +36,6 @@ isBetween3And10(10)
|
||||
// → false
|
||||
```
|
||||
|
||||
### `not(predicate)`
|
||||
|
||||
> Returns a predicate that returns the negation of the predicate.
|
||||
|
||||
```js
|
||||
const isEven = n => n % 2 === 0
|
||||
const isOdd = not(isEven)
|
||||
|
||||
isOdd(1)
|
||||
// true
|
||||
|
||||
isOdd(2)
|
||||
// false
|
||||
```
|
||||
|
||||
### `some(predicates)`
|
||||
|
||||
> Returns a predicate that returns `true` iff some predicate returns `true`.
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/predicates
|
||||
```
|
||||
> npm install --save @vates/predicates
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -19,7 +19,7 @@ npm install --save @vates/predicates
|
||||
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
|
||||
|
||||
```js
|
||||
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
|
||||
const compositePredicate = every(undefined, some(predicate2, undefined))
|
||||
|
||||
// ends up as
|
||||
|
||||
@@ -54,21 +54,6 @@ isBetween3And10(10)
|
||||
// → false
|
||||
```
|
||||
|
||||
### `not(predicate)`
|
||||
|
||||
> Returns a predicate that returns the negation of the predicate.
|
||||
|
||||
```js
|
||||
const isEven = n => n % 2 === 0
|
||||
const isOdd = not(isEven)
|
||||
|
||||
isOdd(1)
|
||||
// true
|
||||
|
||||
isOdd(2)
|
||||
// false
|
||||
```
|
||||
|
||||
### `some(predicates)`
|
||||
|
||||
> Returns a predicate that returns `true` iff some predicate returns `true`.
|
||||
|
||||
@@ -51,22 +51,6 @@ exports.every = function every() {
|
||||
}
|
||||
}
|
||||
|
||||
const notPredicateTag = {}
|
||||
exports.not = function not(predicate) {
|
||||
if (isDefinedPredicate(predicate)) {
|
||||
if (predicate.tag === notPredicateTag) {
|
||||
return predicate.predicate
|
||||
}
|
||||
|
||||
function notPredicate() {
|
||||
return !predicate.apply(this, arguments)
|
||||
}
|
||||
notPredicate.predicate = predicate
|
||||
notPredicate.tag = notPredicateTag
|
||||
return notPredicate
|
||||
}
|
||||
}
|
||||
|
||||
exports.some = function some() {
|
||||
const predicates = handleArgs.apply(this, arguments)
|
||||
const n = predicates.length
|
||||
|
||||
@@ -3,14 +3,20 @@
|
||||
const assert = require('assert/strict')
|
||||
const { describe, it } = require('tap').mocha
|
||||
|
||||
const { every, not, some } = require('./')
|
||||
const { every, some } = require('./')
|
||||
|
||||
const T = () => true
|
||||
const F = () => false
|
||||
|
||||
const testArgHandling = fn => {
|
||||
it('returns undefined if predicate is undefined', () => {
|
||||
const testArgsHandling = fn => {
|
||||
it('returns undefined if all predicates are undefined', () => {
|
||||
assert.equal(fn(undefined), undefined)
|
||||
assert.equal(fn([undefined]), undefined)
|
||||
})
|
||||
|
||||
it('returns the predicate if only a single one is passed', () => {
|
||||
assert.equal(fn(undefined, T), T)
|
||||
assert.equal(fn([undefined, T]), T)
|
||||
})
|
||||
|
||||
it('throws if it receives a non-predicate', () => {
|
||||
@@ -18,15 +24,6 @@ const testArgHandling = fn => {
|
||||
error.value = 3
|
||||
assert.throws(() => fn(3), error)
|
||||
})
|
||||
}
|
||||
|
||||
const testArgsHandling = fn => {
|
||||
testArgHandling(fn)
|
||||
|
||||
it('returns the predicate if only a single one is passed', () => {
|
||||
assert.equal(fn(undefined, T), T)
|
||||
assert.equal(fn([undefined, T]), T)
|
||||
})
|
||||
|
||||
it('forwards this and arguments to predicates', () => {
|
||||
const thisArg = 'qux'
|
||||
@@ -39,21 +36,17 @@ const testArgsHandling = fn => {
|
||||
})
|
||||
}
|
||||
|
||||
const runTests = (fn, acceptMultiple, truthTable) =>
|
||||
const runTests = (fn, truthTable) =>
|
||||
it('works', () => {
|
||||
truthTable.forEach(([result, ...predicates]) => {
|
||||
if (acceptMultiple) {
|
||||
assert.equal(fn(predicates)(), result)
|
||||
} else {
|
||||
assert.equal(predicates.length, 1)
|
||||
}
|
||||
assert.equal(fn(...predicates)(), result)
|
||||
assert.equal(fn(predicates)(), result)
|
||||
})
|
||||
})
|
||||
|
||||
describe('every', () => {
|
||||
testArgsHandling(every)
|
||||
runTests(every, true, [
|
||||
runTests(every, [
|
||||
[true, T, T],
|
||||
[false, T, F],
|
||||
[false, F, T],
|
||||
@@ -61,22 +54,9 @@ describe('every', () => {
|
||||
])
|
||||
})
|
||||
|
||||
describe('not', () => {
|
||||
testArgHandling(not)
|
||||
|
||||
it('returns the original predicate if negated twice', () => {
|
||||
assert.equal(not(not(T)), T)
|
||||
})
|
||||
|
||||
runTests(not, false, [
|
||||
[true, F],
|
||||
[false, T],
|
||||
])
|
||||
})
|
||||
|
||||
describe('some', () => {
|
||||
testArgsHandling(some)
|
||||
runTests(some, true, [
|
||||
runTests(some, [
|
||||
[true, T, T],
|
||||
[true, T, F],
|
||||
[true, F, T],
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.1.0",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
|
||||
@@ -24,25 +24,3 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/read-chunk
|
||||
```
|
||||
> npm install --save @vates/read-chunk
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -43,28 +43,6 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,36 +1,9 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, `null` will be returned *unless* the stream has ended, in which case all of the data remaining will be returned.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown|null>} - A Promise that resolves to the read chunk if available, or null if end of stream is reached.
|
||||
*/
|
||||
const readChunk = (stream, size) =>
|
||||
stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: stream.closed || stream.readableEnded
|
||||
? Promise.resolve(null)
|
||||
size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
@@ -58,21 +31,6 @@ const readChunk = (stream, size) =>
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, the returned promise is rejected.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown>} - A Promise that resolves to the read chunk.
|
||||
*/
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
if (chunk === null) {
|
||||
@@ -80,7 +38,7 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
@@ -91,69 +49,3 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a readable stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<number>} A Promise that resolves to the number of bytes actually skipped. If the end of the stream is reached before all bytes are skipped, the Promise resolves to the number of bytes that were skipped before the end of the stream was reached. The Promise is rejected if there is an error while reading from the stream.
|
||||
*/
|
||||
async function skip(stream, size) {
|
||||
return stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: size === 0 || stream.closed || stream.readableEnded
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
exports.skip = skip
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<void>} - A Promise that resolves when the exact number of bytes have been skipped. The Promise is rejected if there is an error while reading from the stream or the stream ends before the exact number of bytes have been skipped.
|
||||
*/
|
||||
exports.skipStrict = async function skipStrict(stream, size) {
|
||||
const bytesSkipped = await skip(stream, size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
69
@vates/read-chunk/index.spec.js
Normal file
69
@vates/read-chunk/index.spec.js
Normal file
@@ -0,0 +1,69 @@
|
||||
'use strict'
|
||||
|
||||
/* eslint-env jest */
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('returns null if stream is empty', async () => {
|
||||
expect(await readChunk(makeStream([]))).toBe(null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended without data')
|
||||
expect(error.chunk).toEqual(undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended with not enough data')
|
||||
expect(error.chunk).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
@@ -1,147 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict, skip, skipStrict } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = fn => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(fn(stream, 10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
// only supported for Node >= 18
|
||||
if (process.versions.node.split('.')[0] >= 18) {
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(fn(stream, 10)), error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('rejects if size is greater than or equal to 1 GiB', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 1024 * 1024 * 1024))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
makeErrorTests(readChunk)
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await readChunk(makeStream([])), null)
|
||||
})
|
||||
|
||||
it('returns null if the stream is already ended', async () => {
|
||||
const stream = await makeStream([])
|
||||
await readChunk(stream)
|
||||
|
||||
assert.strictEqual(await readChunk(stream), null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar'])), Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 2), Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 4), Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
assert.strictEqual(await readChunk(makeStream.obj(chunks)), chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended without data')
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
makeErrorTests(skip)
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await skip(makeStream(['foo']), 0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const stream = await makeStream([])
|
||||
await readChunk(stream)
|
||||
|
||||
assert.strictEqual(await skip(stream, 10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const stream = makeStream('foo bar')
|
||||
|
||||
assert.strictEqual(await skip(stream, 4), 4)
|
||||
assert.deepEqual(await readChunk(stream, 4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(skipStrict(makeStream('foo bar'), 10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
})
|
||||
@@ -19,19 +19,15 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.1.1",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,75 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/stream-reader
|
||||
|
||||
[](https://npmjs.org/package/@vates/stream-reader)  [](https://bundlephobia.com/result?p=@vates/stream-reader) [](https://npmjs.org/package/@vates/stream-reader)
|
||||
|
||||
> Efficiently reads and skips chunks of a given size in a stream
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/stream-reader):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/stream-reader
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,123 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert')
|
||||
const { finished, Readable } = require('node:stream')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Inspired by https://github.com/nodejs/node/blob/85705a47958c9ae5dbaa1f57456db19bdefdc494/lib/internal/streams/readable.js#L1107
|
||||
class StreamReader {
|
||||
#ended = false
|
||||
#error
|
||||
#executor = resolve => {
|
||||
this.#resolve = resolve
|
||||
}
|
||||
#stream
|
||||
#resolve = noop
|
||||
|
||||
constructor(stream) {
|
||||
stream = typeof stream.pipe === 'function' ? stream : Readable.from(stream)
|
||||
|
||||
this.#stream = stream
|
||||
|
||||
stream.on('readable', () => this.#resolve())
|
||||
|
||||
finished(stream, { writable: false }, error => {
|
||||
this.#error = error
|
||||
this.#ended = true
|
||||
this.#resolve()
|
||||
})
|
||||
}
|
||||
|
||||
async read(size) {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
}
|
||||
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const value = this.#stream.read(size)
|
||||
if (value !== null) {
|
||||
return value
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async readStrict(size) {
|
||||
const chunk = await this.read(size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
async skip(size) {
|
||||
if (size === 0) {
|
||||
return size
|
||||
}
|
||||
|
||||
let toSkip = size
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return size - toSkip
|
||||
}
|
||||
|
||||
const data = this.#stream.read()
|
||||
if (data !== null) {
|
||||
toSkip -= data === null ? 0 : data.length
|
||||
if (toSkip > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (toSkip < 0) {
|
||||
this.#stream.unshift(data.slice(toSkip))
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async skipStrict(size) {
|
||||
const bytesSkipped = await this.skip(size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StreamReader.prototype[Symbol.asyncIterator] = async function* asyncIterator() {
|
||||
let chunk
|
||||
while ((chunk = await this.read()) !== null) {
|
||||
yield chunk
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = StreamReader
|
||||
@@ -1,141 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const StreamReader = require('./index.js')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = method => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(new StreamReader(stream)[method](10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(new StreamReader(stream)[method](10)), error)
|
||||
})
|
||||
}
|
||||
|
||||
describe('read()', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).read(0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream([])).read(), null)
|
||||
})
|
||||
|
||||
makeErrorTests('read')
|
||||
|
||||
it('returns null if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.read(), null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(), Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(2), Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(4), Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(10), Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
assert.strictEqual(await new StreamReader(makeStream.obj(chunks)).read(), chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('readStrict()', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).readStrict())
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended without data')
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream(['foo', 'bar'])).readStrict(10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip()', function () {
|
||||
makeErrorTests('skip')
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream(['foo'])).skip(0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.skip(10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const reader = new StreamReader(makeStream('foo bar'))
|
||||
|
||||
assert.strictEqual(await reader.skip(4), 4)
|
||||
assert.deepEqual(await reader.read(4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream('foo bar')).skip(10), 7)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict()', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream('foo bar')).skipStrict(10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
})
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/stream-reader",
|
||||
"description": "Efficiently reads and skips chunks of a given size in a stream",
|
||||
"keywords": [
|
||||
"async",
|
||||
"chunk",
|
||||
"data",
|
||||
"node",
|
||||
"promise",
|
||||
"read",
|
||||
"reader",
|
||||
"skip",
|
||||
"stream"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/stream-reader",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/stream-reader",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
```js
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
// if not defined and created inside an existing task, the new task is considered a subtask
|
||||
onProgress(event) {
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.info(message, data)
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.warning(message, data)
|
||||
|
||||
// attaches a property to the current task if any, otherwise does nothing
|
||||
//
|
||||
// the latest value takes precedence
|
||||
//
|
||||
// examples:
|
||||
// - progress
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,145 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/task
|
||||
|
||||
[](https://npmjs.org/package/@vates/task)  [](https://bundlephobia.com/result?p=@vates/task) [](https://npmjs.org/package/@vates/task)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/task):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/task
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
// if not defined and created inside an existing task, the new task is considered a subtask
|
||||
onProgress(event) {
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.info(message, data)
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.warning(message, data)
|
||||
|
||||
// attaches a property to the current task if any, otherwise does nothing
|
||||
//
|
||||
// the latest value takes precedence
|
||||
//
|
||||
// examples:
|
||||
// - progress
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,77 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function omit(source, keys, target = { __proto__: null }) {
|
||||
for (const key of Object.keys(source)) {
|
||||
if (!keys.has(key)) {
|
||||
target[key] = source[key]
|
||||
}
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
const IGNORED_START_PROPS = new Set([
|
||||
'end',
|
||||
'infos',
|
||||
'properties',
|
||||
'result',
|
||||
'status',
|
||||
'tasks',
|
||||
'timestamp',
|
||||
'type',
|
||||
'warnings',
|
||||
])
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = omit(event, IGNORED_START_PROPS)
|
||||
taskLog.start = event.timestamp
|
||||
taskLog.status = 'pending'
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
if (parentId === undefined) {
|
||||
Object.defineProperty(taskLog, '$root', { value: taskLog })
|
||||
|
||||
// start of a root task
|
||||
onRootTaskStart(taskLog)
|
||||
} else {
|
||||
// start of a subtask
|
||||
const parent = taskLogs.get(parentId)
|
||||
assert.notEqual(parent, undefined)
|
||||
|
||||
// inject a (non-enumerable) reference to the parent and the root task
|
||||
Object.defineProperty(taskLog, { $parent: { value: parent }, $root: { value: parent.$root } })
|
||||
;(parent.tasks ?? (parent.tasks = [])).push(taskLog)
|
||||
}
|
||||
} else {
|
||||
taskLog = taskLogs.get(id)
|
||||
assert.notEqual(taskLog, undefined)
|
||||
|
||||
if (type === 'info' || type === 'warning') {
|
||||
const key = type + 's'
|
||||
const { data, message } = event
|
||||
;(taskLog[key] ?? (taskLog[key] = [])).push({ data, message })
|
||||
} else if (type === 'property') {
|
||||
;(taskLog.properties ?? (taskLog.properties = { __proto__: null }))[event.name] = event.value
|
||||
} else if (type === 'end') {
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
onRootTaskEnd(taskLog)
|
||||
}
|
||||
}
|
||||
|
||||
onTaskUpdate(taskLog)
|
||||
}
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { AsyncLocalStorage } = require('node:async_hooks')
|
||||
|
||||
// define a read-only, non-enumerable, non-configurable property
|
||||
function define(object, property, value) {
|
||||
Object.defineProperty(object, property, { value })
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const ABORTED = 'aborted'
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { ABORTED, FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
const asyncStorage = global[asyncStorageKey] ?? (global[asyncStorageKey] = new AsyncLocalStorage())
|
||||
|
||||
const getTask = () => asyncStorage.getStore()
|
||||
|
||||
exports.Task = class Task {
|
||||
static get abortSignal() {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
return task.#abortController.signal
|
||||
}
|
||||
}
|
||||
|
||||
static info(message, data) {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
task.#emit('info', { data, message })
|
||||
}
|
||||
}
|
||||
|
||||
static run(opts, fn) {
|
||||
return new this(opts).run(fn)
|
||||
}
|
||||
|
||||
static set(name, value) {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
task.#emit('property', { name, value })
|
||||
}
|
||||
}
|
||||
|
||||
static warning(message, data) {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
task.#emit('warning', { data, message })
|
||||
}
|
||||
}
|
||||
|
||||
static wrap(opts, fn) {
|
||||
// compatibility with @decorateWith
|
||||
if (typeof fn !== 'function') {
|
||||
;[fn, opts] = [opts, fn]
|
||||
}
|
||||
|
||||
return function taskRun() {
|
||||
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
|
||||
#abortController = new AbortController()
|
||||
#onProgress
|
||||
|
||||
get id() {
|
||||
return (this.id = Math.random().toString(36).slice(2))
|
||||
}
|
||||
set id(value) {
|
||||
define(this, 'id', value)
|
||||
}
|
||||
|
||||
#startData
|
||||
|
||||
#status = PENDING
|
||||
get status() {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ data = {}, onProgress } = {}) {
|
||||
this.#startData = data
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
} else {
|
||||
const parent = getTask()
|
||||
if (parent !== undefined) {
|
||||
const { signal } = parent.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
this.#abortController.abort(signal.reason)
|
||||
})
|
||||
|
||||
this.#onProgress = parent.#onProgress
|
||||
this.#startData.parentId = parent.id
|
||||
} else {
|
||||
this.#onProgress = noop
|
||||
}
|
||||
}
|
||||
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING && !this.#running) {
|
||||
this.#maybeStart()
|
||||
|
||||
const status = ABORTED
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
abort(reason) {
|
||||
this.#abortController.abort(reason)
|
||||
}
|
||||
|
||||
#emit(type, data) {
|
||||
data.id = this.id
|
||||
data.timestamp = Date.now()
|
||||
data.type = type
|
||||
this.#onProgress(data)
|
||||
}
|
||||
|
||||
#maybeStart() {
|
||||
const startData = this.#startData
|
||||
if (startData !== undefined) {
|
||||
this.#startData = undefined
|
||||
this.#emit('start', startData)
|
||||
}
|
||||
}
|
||||
|
||||
async run(fn) {
|
||||
const result = await this.runInside(fn)
|
||||
if (this.status === PENDING) {
|
||||
this.#status = SUCCESS
|
||||
this.#emit('end', { status: SUCCESS, result })
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
#running = false
|
||||
async runInside(fn) {
|
||||
assert.equal(this.status, PENDING)
|
||||
assert.equal(this.#running, false)
|
||||
this.#running = true
|
||||
|
||||
this.#maybeStart()
|
||||
|
||||
try {
|
||||
const result = await asyncStorage.run(this, fn)
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
const { signal } = this.#abortController
|
||||
const aborted = signal.aborted && result === signal.reason
|
||||
const status = aborted ? ABORTED : FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
throw result
|
||||
}
|
||||
}
|
||||
|
||||
wrap(fn) {
|
||||
const task = this
|
||||
return function taskRun() {
|
||||
return task.run(() => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
|
||||
wrapInside(fn) {
|
||||
const task = this
|
||||
return function taskRunInside() {
|
||||
return task.runInside(() => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,341 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function assertEvent(task, expected, eventIndex = -1) {
|
||||
const logs = task.$events
|
||||
const actual = logs[eventIndex < 0 ? logs.length + eventIndex : eventIndex]
|
||||
|
||||
assert.equal(typeof actual, 'object')
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.equal(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
// like new Task() but with a custom onProgress which adds event to task.$events
|
||||
function createTask(opts) {
|
||||
const events = []
|
||||
const task = new Task({ ...opts, onProgress: events.push.bind(events) })
|
||||
task.$events = events
|
||||
return task
|
||||
}
|
||||
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const data = { foo: 0, bar: 1 }
|
||||
const task = createTask({ data })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { ...data, type: 'start' }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
it('subtasks events are passed to root task', async function () {
|
||||
const task = createTask()
|
||||
const result = {}
|
||||
|
||||
await task.run(async () => {
|
||||
await new Task().run(() => result)
|
||||
})
|
||||
|
||||
assert.equal(task.$events.length, 4)
|
||||
assertEvent(task, { type: 'start', parentId: task.id }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
describe('.abortSignal', function () {
|
||||
it('is undefined when run outside a task', function () {
|
||||
assert.equal(Task.abortSignal, undefined)
|
||||
})
|
||||
|
||||
it('is the current abort signal when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
const { abortSignal } = Task
|
||||
assert.equal(abortSignal.aborted, false)
|
||||
task.abort()
|
||||
assert.equal(abortSignal.aborted, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.abort()', function () {
|
||||
it('aborts if the task throws fails with the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'aborted')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort({})
|
||||
|
||||
throw result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 1)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort({})
|
||||
|
||||
return result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 1)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'aborted')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.info()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.info('foo')
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.info('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'info',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.set('progress', 10)
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.set('progress', 10)
|
||||
assertEvent(task, {
|
||||
name: 'progress',
|
||||
type: 'property',
|
||||
value: 10,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.warning()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.warning('foo')
|
||||
})
|
||||
|
||||
it('emits an warning message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.warning('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'warning',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('#id', function () {
|
||||
it('can be set', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
assert.equal(task.id, 'foo')
|
||||
})
|
||||
|
||||
it('cannot be set more than once', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
|
||||
it('is randomly generated if not set', function () {
|
||||
assert.notEqual(createTask().id, createTask().id)
|
||||
})
|
||||
|
||||
it('cannot be set after being observed', function () {
|
||||
const task = createTask()
|
||||
noop(task.id)
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#status', function () {
|
||||
it('starts as pending', function () {
|
||||
assert.equal(createTask().status, 'pending')
|
||||
})
|
||||
|
||||
it('changes to success when finish without error', async function () {
|
||||
const task = createTask()
|
||||
await task.run(noop)
|
||||
assert.equal(task.status, 'success')
|
||||
})
|
||||
|
||||
it('changes to failure when finish with error', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
throw Error()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort()
|
||||
assert.equal(task.status, 'pending')
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'aborted')
|
||||
})
|
||||
|
||||
it('changes to aborted if aborted when not running', async function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'aborted')
|
||||
})
|
||||
})
|
||||
|
||||
function makeRunTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('finishes the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'success')
|
||||
assertEvent(task, {
|
||||
status: 'success',
|
||||
result: 'foo',
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.run', function () {
|
||||
makeRunTests((task, fn) => task.run(fn))
|
||||
})
|
||||
describe('.wrap', function () {
|
||||
makeRunTests((task, fn) => task.wrap(fn)())
|
||||
})
|
||||
|
||||
function makeRunInsideTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('does not finish the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'pending')
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.runInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.runInside(fn))
|
||||
})
|
||||
describe('.wrapInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.wrapInside(fn)())
|
||||
})
|
||||
})
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/task",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/task",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/task",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"exports": {
|
||||
".": "./index.js",
|
||||
"./combineEvents": "./combineEvents.js"
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/toggle-scripts
|
||||
```
|
||||
> npm install --save @vates/toggle-scripts
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -30,7 +30,6 @@ if (args.length === 0) {
|
||||
|
||||
${name} v${version}
|
||||
`)
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
process.exit()
|
||||
}
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async-map):
|
||||
|
||||
```sh
|
||||
npm install --save @xen-orchestra/async-map
|
||||
```
|
||||
> npm install --save @xen-orchestra/async-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('assert').strict
|
||||
const sinon = require('sinon')
|
||||
/* eslint-env jest */
|
||||
|
||||
const { asyncMapSettled } = require('./')
|
||||
|
||||
@@ -11,29 +9,26 @@ const noop = Function.prototype
|
||||
describe('asyncMapSettled', () => {
|
||||
it('works', async () => {
|
||||
const values = [Math.random(), Math.random()]
|
||||
const spy = sinon.spy(async v => v * 2)
|
||||
const spy = jest.fn(async v => v * 2)
|
||||
const iterable = new Set(values)
|
||||
|
||||
// returns an array containing the result of each calls
|
||||
assert.deepStrictEqual(
|
||||
await asyncMapSettled(iterable, spy),
|
||||
values.map(value => value * 2)
|
||||
)
|
||||
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
|
||||
|
||||
for (let i = 0, n = values.length; i < n; ++i) {
|
||||
// each call receive the current item as sole argument
|
||||
assert.deepStrictEqual(spy.args[i], [values[i]])
|
||||
expect(spy.mock.calls[i]).toEqual([values[i]])
|
||||
|
||||
// each call as this bind to the iterable
|
||||
assert.deepStrictEqual(spy.thisValues[i], iterable)
|
||||
expect(spy.mock.instances[i]).toBe(iterable)
|
||||
}
|
||||
})
|
||||
|
||||
it('can use a specified thisArg', () => {
|
||||
const thisArg = {}
|
||||
const spy = sinon.spy()
|
||||
const spy = jest.fn()
|
||||
asyncMapSettled(['foo'], spy, thisArg)
|
||||
assert.deepStrictEqual(spy.thisValues[0], thisArg)
|
||||
expect(spy.mock.instances[0]).toBe(thisArg)
|
||||
})
|
||||
|
||||
it('rejects only when all calls as resolved', async () => {
|
||||
@@ -60,22 +55,19 @@ describe('asyncMapSettled', () => {
|
||||
// wait for all microtasks to settle
|
||||
await new Promise(resolve => setImmediate(resolve))
|
||||
|
||||
assert.strictEqual(hasSettled, false)
|
||||
expect(hasSettled).toBe(false)
|
||||
|
||||
defers[1].resolve()
|
||||
|
||||
// wait for all microtasks to settle
|
||||
await new Promise(resolve => setImmediate(resolve))
|
||||
|
||||
assert.strictEqual(hasSettled, true)
|
||||
await assert.rejects(promise, error)
|
||||
expect(hasSettled).toBe(true)
|
||||
await expect(promise).rejects.toBe(error)
|
||||
})
|
||||
|
||||
it('issues when latest promise rejects', async () => {
|
||||
const error = new Error()
|
||||
await assert.rejects(
|
||||
asyncMapSettled([1], () => Promise.reject(error)),
|
||||
error
|
||||
)
|
||||
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
|
||||
})
|
||||
})
|
||||
@@ -31,11 +31,6 @@
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^15.0.1",
|
||||
"test": "^3.2.1"
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/audit-core):
|
||||
|
||||
```sh
|
||||
npm install --save @xen-orchestra/audit-core
|
||||
```
|
||||
> npm install --save @xen-orchestra/audit-core
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.2.3",
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
@@ -17,7 +17,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
},
|
||||
|
||||
@@ -5,6 +5,7 @@ const PRESETS_RE = /^@babel\/preset-.+$/
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const configs = {
|
||||
'@babel/plugin-proposal-decorators': {
|
||||
@@ -14,7 +15,7 @@ const configs = {
|
||||
proposal: 'minimal',
|
||||
},
|
||||
'@babel/preset-env': {
|
||||
debug: __PROD__,
|
||||
debug: !__TEST__,
|
||||
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups-cli):
|
||||
|
||||
```sh
|
||||
npm install --global @xen-orchestra/backups-cli
|
||||
```
|
||||
> npm install --global @xen-orchestra/backups-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import { readFileSync } from 'fs'
|
||||
import getopts from 'getopts'
|
||||
'use strict'
|
||||
|
||||
const { version } = JSON.parse(readFileSync(new URL('package.json', import.meta.url)))
|
||||
const getopts = require('getopts')
|
||||
|
||||
export function composeCommands(commands) {
|
||||
return async function (args, prefix) {
|
||||
const { version } = require('./package.json')
|
||||
|
||||
module.exports = commands =>
|
||||
async function (args, prefix) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
help: 'h',
|
||||
@@ -29,6 +30,5 @@ xo-backups v${version}
|
||||
return
|
||||
}
|
||||
|
||||
return (await command.default)(args.slice(1), prefix + ' ' + commandName)
|
||||
return command.main(args.slice(1), prefix + ' ' + commandName)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
import fs from 'fs/promises'
|
||||
import { dirname } from 'path'
|
||||
'use strict'
|
||||
|
||||
export * from 'fs/promises'
|
||||
const { dirname } = require('path')
|
||||
|
||||
export const getSize = path =>
|
||||
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
|
||||
module.exports = fs
|
||||
|
||||
fs.getSize = path =>
|
||||
fs.stat(path).then(
|
||||
_ => _.size,
|
||||
error => {
|
||||
@@ -14,7 +16,7 @@ export const getSize = path =>
|
||||
}
|
||||
)
|
||||
|
||||
export async function mktree(path) {
|
||||
fs.mktree = async function mkdirp(path) {
|
||||
try {
|
||||
await fs.mkdir(path)
|
||||
} catch (error) {
|
||||
@@ -24,8 +26,8 @@ export async function mktree(path) {
|
||||
return
|
||||
}
|
||||
if (code === 'ENOENT') {
|
||||
await mktree(dirname(path))
|
||||
return mktree(path)
|
||||
await mkdirp(dirname(path))
|
||||
return mkdirp(path)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
@@ -35,7 +37,7 @@ export async function mktree(path) {
|
||||
// - single param for direct use in `Array#map`
|
||||
// - files are prefixed with directory path
|
||||
// - safer: returns empty array if path is missing or not a directory
|
||||
export const readdir2 = path =>
|
||||
fs.readdir2 = path =>
|
||||
fs.readdir(path).then(
|
||||
entries => {
|
||||
entries.forEach((entry, i) => {
|
||||
@@ -57,7 +59,7 @@ export const readdir2 = path =>
|
||||
}
|
||||
)
|
||||
|
||||
export async function symlink2(target, path) {
|
||||
fs.symlink2 = async (target, path) => {
|
||||
try {
|
||||
await fs.symlink(target, path)
|
||||
} catch (error) {
|
||||
40
@xen-orchestra/backups-cli/commands/clean-vms.js
Normal file
40
@xen-orchestra/backups-cli/commands/clean-vms.js
Normal file
@@ -0,0 +1,40 @@
|
||||
'use strict'
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
|
||||
const getopts = require('getopts')
|
||||
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
|
||||
const { resolve } = require('path')
|
||||
|
||||
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
|
||||
|
||||
module.exports = async function main(args) {
|
||||
const { _, fix, remove, merge } = getopts(args, {
|
||||
alias: {
|
||||
fix: 'f',
|
||||
remove: 'r',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['fix', 'merge', 'remove'],
|
||||
default: {
|
||||
merge: false,
|
||||
remove: false,
|
||||
},
|
||||
})
|
||||
|
||||
await asyncMap(_, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
try {
|
||||
await adapter.cleanVm(vmDir, {
|
||||
fixMetadata: fix,
|
||||
remove,
|
||||
merge,
|
||||
logInfo: (...args) => console.log(...args),
|
||||
logWarn: (...args) => console.warn(...args),
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import getopts from 'getopts'
|
||||
import { basename, dirname } from 'path'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { pathToFileURL } from 'url'
|
||||
|
||||
export default async function cleanVms(args) {
|
||||
const { _, fix, remove, merge } = getopts(args, {
|
||||
alias: {
|
||||
fix: 'f',
|
||||
remove: 'r',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['fix', 'merge', 'remove'],
|
||||
default: {
|
||||
merge: false,
|
||||
remove: false,
|
||||
},
|
||||
})
|
||||
|
||||
await asyncMap(_, vmDir =>
|
||||
Disposable.use(getSyncedHandler({ url: pathToFileURL(dirname(vmDir)).href }), async handler => {
|
||||
try {
|
||||
await new RemoteAdapter(handler).cleanVm(basename(vmDir), {
|
||||
fixMetadata: fix,
|
||||
remove,
|
||||
merge,
|
||||
logInfo: (...args) => console.log(...args),
|
||||
logWarn: (...args) => console.warn(...args),
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
@@ -1,10 +1,13 @@
|
||||
import { mktree, readdir2, readFile, symlink2 } from '../_fs.mjs'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import filenamify from 'filenamify'
|
||||
import get from 'lodash/get.js'
|
||||
import { dirname, join, relative } from 'path'
|
||||
'use strict'
|
||||
|
||||
export default async function createSymlinkIndex([backupDir, fieldPath]) {
|
||||
const filenamify = require('filenamify')
|
||||
const get = require('lodash/get')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { dirname, join, relative } = require('path')
|
||||
|
||||
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
|
||||
|
||||
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
|
||||
const indexDir = join(backupDir, 'indexes', filenamify(fieldPath))
|
||||
await mktree(indexDir)
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
import { readdir2, readFile, getSize } from '../_fs.mjs'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createHash } from 'crypto'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import { dirname, resolve } from 'path'
|
||||
'use strict'
|
||||
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createHash } = require('crypto')
|
||||
const { dirname, resolve } = require('path')
|
||||
|
||||
const { readdir2, readFile, getSize } = require('../_fs')
|
||||
|
||||
const sha512 = str => createHash('sha512').update(str).digest('hex')
|
||||
const sum = values => values.reduce((a, b) => a + b)
|
||||
|
||||
export default async function info(vmDirs) {
|
||||
module.exports = async function info(vmDirs) {
|
||||
const jsonFiles = (
|
||||
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
|
||||
).flat()
|
||||
@@ -1,12 +1,11 @@
|
||||
#!/usr/bin/env node
|
||||
import { composeCommands } from './_composeCommands.mjs'
|
||||
|
||||
const importDefault = async path => (await import(path)).default
|
||||
'use strict'
|
||||
|
||||
composeCommands({
|
||||
require('./_composeCommands')({
|
||||
'clean-vms': {
|
||||
get default() {
|
||||
return importDefault('./commands/clean-vms.mjs')
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
@@ -19,14 +18,14 @@ composeCommands({
|
||||
`,
|
||||
},
|
||||
'create-symlink-index': {
|
||||
get default() {
|
||||
return importDefault('./commands/create-symlink-index.mjs')
|
||||
get main() {
|
||||
return require('./commands/create-symlink-index')
|
||||
},
|
||||
usage: 'xo-vm-backups <field path>',
|
||||
},
|
||||
info: {
|
||||
get default() {
|
||||
return importDefault('./commands/info.mjs')
|
||||
get main() {
|
||||
return require('./commands/info')
|
||||
},
|
||||
usage: 'xo-vm-backups/*',
|
||||
},
|
||||
@@ -1,21 +1,21 @@
|
||||
{
|
||||
"private": false,
|
||||
"bin": {
|
||||
"xo-backups": "index.mjs"
|
||||
"xo-backups": "index.js"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.36.0",
|
||||
"@xen-orchestra/fs": "^3.3.4",
|
||||
"@xen-orchestra/backups": "^0.27.4",
|
||||
"@xen-orchestra/fs": "^3.1.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "1.0.6",
|
||||
"version": "0.7.7",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const pTimeout = require('promise-toolbox/timeout')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
@@ -12,7 +11,6 @@ const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
@@ -27,7 +25,6 @@ const getAdaptersByRemote = adapters => {
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
const DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
@@ -41,15 +38,12 @@ const DEFAULT_VM_SETTINGS = {
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
maxMergedDeltasPerRun: 2,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
@@ -58,13 +52,6 @@ const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
@@ -72,6 +59,13 @@ exports.Backup = class Backup {
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getAdapter = Disposable.factory(function* (remoteId) {
|
||||
return {
|
||||
adapter: yield getAdapter(remoteId),
|
||||
remoteId,
|
||||
}
|
||||
})
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
@@ -92,27 +86,6 @@ exports.Backup = class Backup {
|
||||
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
|
||||
const { getRemoteTimeout } = this._settings
|
||||
this._getAdapter = async function (remoteId) {
|
||||
try {
|
||||
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
|
||||
|
||||
return new Disposable(() => disposable.dispose(), {
|
||||
adapter: disposable.value,
|
||||
remoteId,
|
||||
})
|
||||
} catch (error) {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id: remoteId },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
@@ -158,7 +131,20 @@ exports.Backup = class Backup {
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
|
||||
Disposable.all(
|
||||
remoteIds.map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
@@ -229,11 +215,9 @@ exports.Backup = class Backup {
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
const settings = this._settings
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
@@ -248,7 +232,19 @@ exports.Backup = class Backup {
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
@@ -270,35 +266,23 @@ exports.Backup = class Backup {
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
return this._getRecord('VM', vmUuid).then(
|
||||
disposableVm =>
|
||||
Disposable.use(disposableVm, vm => {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
return runTask(taskStart, () =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
}),
|
||||
error =>
|
||||
runTask(taskStart, () => {
|
||||
throw error
|
||||
})
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
|
||||
@@ -3,14 +3,12 @@
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
#restoredVm
|
||||
|
||||
constructor({ restoredVm, timeout = 10 * 60 * 1000, xapi }) {
|
||||
constructor({ restoredVm, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
this.#timeout = timeout
|
||||
}
|
||||
|
||||
async run() {
|
||||
@@ -25,12 +23,7 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
const waitForScript = restoredVm.tags.includes('xo-backup-health-check-xenstore')
|
||||
if (waitForScript) {
|
||||
await restoredVm.set_xenstore_data({
|
||||
'vm-data/xo-backup-health-check': 'planned',
|
||||
})
|
||||
}
|
||||
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
@@ -41,7 +34,7 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = this.#timeout
|
||||
const timeout = 10 * 60 * 1000
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
@@ -59,52 +52,12 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Running state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const guestToolsReady = new Date()
|
||||
remainingTimeout -= guestToolsReady - running
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get he guest tools check ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
if (waitForScript) {
|
||||
const startedRestoredVm = await xapi.waitObjectState(
|
||||
restoredVm.$ref,
|
||||
vm =>
|
||||
vm?.xenstore_data !== undefined &&
|
||||
(vm.xenstore_data['vm-data/xo-backup-health-check'] === 'success' ||
|
||||
vm.xenstore_data['vm-data/xo-backup-health-check'] === 'failure'),
|
||||
{
|
||||
timeout: remainingTimeout,
|
||||
}
|
||||
)
|
||||
const scriptOk = new Date()
|
||||
remainingTimeout -= scriptOk - guestToolsReady
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(
|
||||
`Backup health check script did not update vm-data/xo-backup-health-check of ${restoredId} after ${
|
||||
timeout / 1000
|
||||
} second, got ${
|
||||
startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check']
|
||||
} instead of 'success' or 'failure'`
|
||||
)
|
||||
}
|
||||
|
||||
if (startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check'] !== 'success') {
|
||||
const message = startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check-error']
|
||||
if (message) {
|
||||
throw new Error(`Backup health check script failed with message ${message} for VM ${restoredId} `)
|
||||
} else {
|
||||
throw new Error(`Backup health check script failed for VM ${restoredId} `)
|
||||
}
|
||||
}
|
||||
Task.info('Backup health check script successfully executed')
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups):
|
||||
|
||||
```sh
|
||||
npm install --save @xen-orchestra/backups
|
||||
```
|
||||
> npm install --save @xen-orchestra/backups
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -28,7 +28,6 @@ const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
||||
const { mount } = require('@vates/fuse-vhd')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
@@ -77,16 +76,14 @@ const debounceResourceFactory = factory =>
|
||||
}
|
||||
|
||||
class RemoteAdapter {
|
||||
constructor(
|
||||
handler,
|
||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||
) {
|
||||
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy=false } = {}) {
|
||||
this._debounceResource = debounceResource
|
||||
this._dirMode = dirMode
|
||||
this._handler = handler
|
||||
this._vhdDirectoryCompression = vhdDirectoryCompression
|
||||
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
|
||||
this._useGetDiskLegacy = useGetDiskLegacy
|
||||
|
||||
}
|
||||
|
||||
get handler() {
|
||||
@@ -209,8 +206,8 @@ class RemoteAdapter {
|
||||
|
||||
const isVhdDirectory = vhd instanceof VhdDirectory
|
||||
return isVhdDirectory
|
||||
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.useVhdDirectory()
|
||||
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.#useVhdDirectory()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -233,23 +230,21 @@ class RemoteAdapter {
|
||||
return promise
|
||||
}
|
||||
|
||||
async #removeVmBackupsFromCache(backups) {
|
||||
await asyncEach(
|
||||
Object.entries(
|
||||
groupBy(
|
||||
backups.map(_ => _._filename),
|
||||
dirname
|
||||
)
|
||||
),
|
||||
([dir, filenames]) =>
|
||||
// will not reject
|
||||
this._updateCache(dir + '/cache.json.gz', backups => {
|
||||
for (const filename of filenames) {
|
||||
debug('removing cache entry', { entry: filename })
|
||||
delete backups[filename]
|
||||
}
|
||||
})
|
||||
)
|
||||
#removeVmBackupsFromCache(backups) {
|
||||
for (const [dir, filenames] of Object.entries(
|
||||
groupBy(
|
||||
backups.map(_ => _._filename),
|
||||
dirname
|
||||
)
|
||||
)) {
|
||||
// detached async action, will not reject
|
||||
this._updateCache(dir + '/cache.json.gz', backups => {
|
||||
for (const filename of filenames) {
|
||||
debug('removing cache entry', { entry: filename })
|
||||
delete backups[filename]
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
@@ -258,7 +253,7 @@ class RemoteAdapter {
|
||||
// this will delete the json, unused VHDs will be detected by `cleanVm`
|
||||
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
|
||||
|
||||
await this.#removeVmBackupsFromCache(backups)
|
||||
this.#removeVmBackupsFromCache(backups)
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
@@ -287,7 +282,7 @@ class RemoteAdapter {
|
||||
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
|
||||
)
|
||||
|
||||
await this.#removeVmBackupsFromCache(backups)
|
||||
this.#removeVmBackupsFromCache(backups)
|
||||
}
|
||||
|
||||
deleteVmBackup(file) {
|
||||
@@ -321,15 +316,17 @@ class RemoteAdapter {
|
||||
return this._vhdDirectoryCompression
|
||||
}
|
||||
|
||||
useVhdDirectory() {
|
||||
#useVhdDirectory() {
|
||||
return this.handler.useVhdDirectory()
|
||||
}
|
||||
|
||||
#useAlias() {
|
||||
return this.useVhdDirectory()
|
||||
return this.#useVhdDirectory()
|
||||
}
|
||||
|
||||
|
||||
async *#getDiskLegacy(diskId) {
|
||||
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
const handler = this._handler
|
||||
|
||||
@@ -361,8 +358,8 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
async *getDisk(diskId) {
|
||||
if (this._useGetDiskLegacy) {
|
||||
yield* this.#getDiskLegacy(diskId)
|
||||
if(this._useGetDiskLegacy){
|
||||
yield * this.#getDiskLegacy(diskId)
|
||||
return
|
||||
}
|
||||
const handler = this._handler
|
||||
@@ -511,7 +508,7 @@ class RemoteAdapter {
|
||||
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
|
||||
}
|
||||
|
||||
async _readCache(path) {
|
||||
async #readCache(path) {
|
||||
try {
|
||||
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
|
||||
} catch (error) {
|
||||
@@ -524,15 +521,15 @@ class RemoteAdapter {
|
||||
_updateCache = synchronized.withKey()(this._updateCache)
|
||||
// eslint-disable-next-line no-dupe-class-members
|
||||
async _updateCache(path, fn) {
|
||||
const cache = await this._readCache(path)
|
||||
const cache = await this.#readCache(path)
|
||||
if (cache !== undefined) {
|
||||
fn(cache)
|
||||
|
||||
await this._writeCache(path, cache)
|
||||
await this.#writeCache(path, cache)
|
||||
}
|
||||
}
|
||||
|
||||
async _writeCache(path, data) {
|
||||
async #writeCache(path, data) {
|
||||
try {
|
||||
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
|
||||
} catch (error) {
|
||||
@@ -540,6 +537,10 @@ class RemoteAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
async invalidateVmBackupListCache(vmUuid) {
|
||||
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
|
||||
}
|
||||
|
||||
async #getCachabledDataListVmBackups(dir) {
|
||||
debug('generating cache', { path: dir })
|
||||
|
||||
@@ -580,7 +581,7 @@ class RemoteAdapter {
|
||||
async _readCacheListVmBackups(vmUuid) {
|
||||
const path = this.#getVmBackupsCache(vmUuid)
|
||||
|
||||
const cache = await this._readCache(path)
|
||||
const cache = await this.#readCache(path)
|
||||
if (cache !== undefined) {
|
||||
debug('found VM backups cache, using it', { path })
|
||||
return cache
|
||||
@@ -593,7 +594,7 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
// detached async action, will not reject
|
||||
this._writeCache(path, backups)
|
||||
this.#writeCache(path, backups)
|
||||
|
||||
return backups
|
||||
}
|
||||
@@ -644,7 +645,7 @@ class RemoteAdapter {
|
||||
})
|
||||
|
||||
// will not throw
|
||||
await this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
|
||||
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
|
||||
debug('adding cache entry', { entry: path })
|
||||
backups[path] = {
|
||||
...metadata,
|
||||
@@ -660,9 +661,10 @@ class RemoteAdapter {
|
||||
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
|
||||
const handler = this._handler
|
||||
if (this.useVhdDirectory()) {
|
||||
|
||||
if (this.#useVhdDirectory()) {
|
||||
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
|
||||
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
|
||||
await createVhdDirectoryFromStream(handler, dataPath, input, {
|
||||
concurrency: writeBlockConcurrency,
|
||||
compression: this.#getCompressionType(),
|
||||
async validator() {
|
||||
@@ -671,14 +673,12 @@ class RemoteAdapter {
|
||||
},
|
||||
})
|
||||
await VhdAbstract.createAlias(handler, path, dataPath)
|
||||
return size
|
||||
} else {
|
||||
return this.outputStream(path, input, { checksum, validator })
|
||||
await this.outputStream(path, input, { checksum, validator })
|
||||
}
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
const container = watchStreamSize(input)
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
@@ -687,7 +687,6 @@ class RemoteAdapter {
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
})
|
||||
return container.size
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
@@ -719,7 +718,7 @@ class RemoteAdapter {
|
||||
|
||||
async readDeltaVmBackup(metadata, ignoredVdis) {
|
||||
const handler = this._handler
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const { vbds, vhds, vifs, vm } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
@@ -734,7 +733,7 @@ class RemoteAdapter {
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||
vm,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -746,49 +745,7 @@ class RemoteAdapter {
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
|
||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||
if (typeof metadata.vm.is_a_template === 'number') {
|
||||
const properties = {
|
||||
vbds: ['bootable', 'unpluggable', 'storage_lock', 'empty', 'currently_attached'],
|
||||
vdis: [
|
||||
'sharable',
|
||||
'read_only',
|
||||
'storage_lock',
|
||||
'managed',
|
||||
'missing',
|
||||
'is_a_snapshot',
|
||||
'allow_caching',
|
||||
'metadata_latest',
|
||||
],
|
||||
vifs: ['currently_attached', 'MAC_autogenerated'],
|
||||
vm: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_a_snapshot', 'is_snapshot_from_vmpp'],
|
||||
vmSnapshot: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_snapshot_from_vmpp'],
|
||||
}
|
||||
|
||||
function fixBooleans(obj, properties) {
|
||||
properties.forEach(property => {
|
||||
if (typeof obj[property] === 'number') {
|
||||
obj[property] = obj[property] === 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for (const [key, propertiesInKey] of Object.entries(properties)) {
|
||||
const value = metadata[key]
|
||||
if (value !== undefined) {
|
||||
// some properties of the metadata are collections indexed by the opaqueRef
|
||||
const isCollection = Object.keys(value).some(subKey => subKey.startsWith('OpaqueRef:'))
|
||||
if (isCollection) {
|
||||
Object.values(value).forEach(subValue => fixBooleans(subValue, propertiesInKey))
|
||||
} else {
|
||||
fixBooleans(value, propertiesInKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadata
|
||||
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ class Task {
|
||||
* In case of error, the task will be failed.
|
||||
*
|
||||
* @typedef Result
|
||||
* @param {() => Result} fn
|
||||
* @param {() => Result)} fn
|
||||
* @param {boolean} last - Whether the task should succeed if there is no error
|
||||
* @returns Result
|
||||
*/
|
||||
|
||||
@@ -6,13 +6,11 @@ const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
const { pipeline } = require('node:stream')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
@@ -46,8 +44,6 @@ const forkDeltaExport = deltaExport =>
|
||||
},
|
||||
})
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
class VmBackup {
|
||||
constructor({
|
||||
config,
|
||||
@@ -59,7 +55,6 @@ class VmBackup {
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}) {
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
@@ -87,7 +82,6 @@ class VmBackup {
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._throttleStream = throttleStream
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
@@ -249,19 +243,8 @@ class VmBackup {
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||
// except the suspended VDI
|
||||
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
if (this._settings.validateVhdStreams) {
|
||||
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
|
||||
}
|
||||
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
@@ -302,12 +285,10 @@ class VmBackup {
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
)
|
||||
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
||||
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
@@ -20,8 +20,6 @@ const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
#config
|
||||
#job
|
||||
@@ -124,11 +122,6 @@ decorateMethodsWith(BackupWorker, {
|
||||
]),
|
||||
})
|
||||
|
||||
const emitMessage = message => {
|
||||
debug('message emitted', { message })
|
||||
process.send(message)
|
||||
}
|
||||
|
||||
// Received message:
|
||||
//
|
||||
// Message {
|
||||
@@ -146,8 +139,6 @@ const emitMessage = message => {
|
||||
// result?: any
|
||||
// }
|
||||
process.on('message', async message => {
|
||||
debug('message received', { message })
|
||||
|
||||
if (message.action === 'run') {
|
||||
const backupWorker = new BackupWorker(message.data)
|
||||
try {
|
||||
@@ -156,7 +147,7 @@ process.on('message', async message => {
|
||||
{
|
||||
name: 'backup run',
|
||||
onLog: data =>
|
||||
emitMessage({
|
||||
process.send({
|
||||
data,
|
||||
type: 'log',
|
||||
}),
|
||||
@@ -165,13 +156,13 @@ process.on('message', async message => {
|
||||
)
|
||||
: await backupWorker.run()
|
||||
|
||||
emitMessage({
|
||||
process.send({
|
||||
type: 'result',
|
||||
result,
|
||||
status: 'success',
|
||||
})
|
||||
} catch (error) {
|
||||
emitMessage({
|
||||
process.send({
|
||||
type: 'result',
|
||||
result: error,
|
||||
status: 'failure',
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
/* eslint-env jest */
|
||||
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
@@ -15,8 +14,9 @@ const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
@@ -25,13 +25,12 @@ beforeEach(async () => {
|
||||
adapter = new RemoteAdapter(handler)
|
||||
jobId = uniqueId()
|
||||
vdiId = uniqueId()
|
||||
relativePath = `vdis/${jobId}/${vdiId}`
|
||||
basePath = `${rootPath}/${relativePath}`
|
||||
basePath = `vdis/${jobId}/${vdiId}`
|
||||
await fs.mkdirp(`${tempDir}/${basePath}`)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await rimraf(tempDir)
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
await handler.forget()
|
||||
})
|
||||
|
||||
@@ -77,18 +76,18 @@ test('It remove broken vhd', async () => {
|
||||
// todo also tests a directory and an alias
|
||||
|
||||
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
|
||||
assert.equal((await handler.list(basePath)).length, 1)
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
let loggued = ''
|
||||
const logInfo = message => {
|
||||
loggued += message
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: false, logInfo, logWarn: logInfo, lock: false })
|
||||
assert.equal(loggued, `VHD check error`)
|
||||
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
|
||||
expect(loggued).toEqual(`VHD check error`)
|
||||
// not removed
|
||||
assert.deepEqual(await handler.list(basePath), ['notReallyAVhd.vhd'])
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
// really remove it
|
||||
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
assert.deepEqual(await handler.list(basePath), [])
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
expect((await handler.list(basePath)).length).toEqual(0)
|
||||
})
|
||||
|
||||
test('it remove vhd with missing or multiple ancestors', async () => {
|
||||
@@ -122,10 +121,10 @@ test('it remove vhd with missing or multiple ancestors', async () => {
|
||||
const logInfo = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
|
||||
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
|
||||
assert.equal(deletedOrphanVhd.length, 1) // only one vhd should have been deleted
|
||||
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
|
||||
|
||||
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
|
||||
})
|
||||
@@ -133,12 +132,12 @@ test('it remove vhd with missing or multiple ancestors', async () => {
|
||||
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
|
||||
// create a metadata file marking child and orphan as ok
|
||||
await handler.writeFile(
|
||||
`${rootPath}/metadata.json`,
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [
|
||||
`${relativePath}/orphan.vhd`,
|
||||
`${relativePath}/child.vhd`,
|
||||
`${basePath}/orphan.vhd`,
|
||||
`${basePath}/child.vhd`,
|
||||
// abandonned.json is not here
|
||||
],
|
||||
})
|
||||
@@ -161,39 +160,39 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
|
||||
const logInfo = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
let matched = loggued.match(/deleting unused VHD/g) || []
|
||||
assert.equal(matched.length, 1) // only one vhd should have been deleted
|
||||
expect(matched.length).toEqual(1) // only one vhd should have been deleted
|
||||
|
||||
// a missing vhd cause clean to remove all vhds
|
||||
await handler.writeFile(
|
||||
`${rootPath}/metadata.json`,
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [
|
||||
`deleted.vhd`, // in metadata but not in vhds
|
||||
`orphan.vhd`,
|
||||
`child.vhd`,
|
||||
`${basePath}/deleted.vhd`, // in metadata but not in vhds
|
||||
`${basePath}/orphan.vhd`,
|
||||
`${basePath}/child.vhd`,
|
||||
// abandonned.vhd is not here anymore
|
||||
],
|
||||
}),
|
||||
{ flags: 'w' }
|
||||
)
|
||||
loggued = ''
|
||||
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
matched = loggued.match(/deleting unused VHD/g) || []
|
||||
assert.equal(matched.length, 2) // all vhds (orphan and child ) should have been deleted
|
||||
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
|
||||
})
|
||||
|
||||
test('it merges delta of non destroyed chain', async () => {
|
||||
await handler.writeFile(
|
||||
`${rootPath}/metadata.json`,
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
size: 12000, // a size too small
|
||||
vhds: [
|
||||
`${relativePath}/grandchild.vhd`, // grand child should not be merged
|
||||
`${relativePath}/child.vhd`,
|
||||
`${basePath}/grandchild.vhd`, // grand child should not be merged
|
||||
`${basePath}/child.vhd`,
|
||||
// orphan is not here, he should be merged in child
|
||||
],
|
||||
})
|
||||
@@ -220,33 +219,33 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
const logInfo = message => {
|
||||
loggued.push(message)
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
assert.equal(loggued[0], `unexpected number of entries in backup cache`)
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
|
||||
|
||||
loggued = []
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
|
||||
const [merging] = loggued
|
||||
assert.equal(merging, `merging VHD chain`)
|
||||
expect(merging).toEqual(`merging VHD chain`)
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
|
||||
// size should be the size of children + grand children after the merge
|
||||
assert.equal(metadata.size, 209920)
|
||||
expect(metadata.size).toEqual(209920)
|
||||
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
// only check deletion
|
||||
const remainingVhds = await handler.list(basePath)
|
||||
assert.equal(remainingVhds.length, 2)
|
||||
assert.equal(remainingVhds.includes('child.vhd'), true)
|
||||
assert.equal(remainingVhds.includes('grandchild.vhd'), true)
|
||||
expect(remainingVhds.length).toEqual(2)
|
||||
expect(remainingVhds.includes('child.vhd')).toEqual(true)
|
||||
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
|
||||
})
|
||||
|
||||
test('it finish unterminated merge ', async () => {
|
||||
await handler.writeFile(
|
||||
`${rootPath}/metadata.json`,
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
size: 209920,
|
||||
vhds: [`${relativePath}/orphan.vhd`, `${relativePath}/child.vhd`],
|
||||
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
|
||||
})
|
||||
)
|
||||
|
||||
@@ -272,13 +271,13 @@ test('it finish unterminated merge ', async () => {
|
||||
})
|
||||
)
|
||||
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
|
||||
// only check deletion
|
||||
const remainingVhds = await handler.list(basePath)
|
||||
assert.equal(remainingVhds.length, 1)
|
||||
assert.equal(remainingVhds.includes('child.vhd'), true)
|
||||
expect(remainingVhds.length).toEqual(1)
|
||||
expect(remainingVhds.includes('child.vhd')).toEqual(true)
|
||||
})
|
||||
|
||||
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
|
||||
@@ -368,34 +367,22 @@ describe('tests multiple combination ', () => {
|
||||
|
||||
// the metadata file
|
||||
await handler.writeFile(
|
||||
`${rootPath}/metadata.json`,
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [
|
||||
`${relativePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
|
||||
`${relativePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
|
||||
`${relativePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
|
||||
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
|
||||
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
|
||||
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
|
||||
],
|
||||
})
|
||||
)
|
||||
if (!useAlias && vhdMode === 'directory') {
|
||||
try {
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
} catch (err) {
|
||||
assert.strictEqual(
|
||||
err.code,
|
||||
'NOT_SUPPORTED',
|
||||
'Merging directory without alias should raise a not supported error'
|
||||
)
|
||||
return
|
||||
}
|
||||
assert.strictEqual(true, false, 'Merging directory without alias should raise an error')
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
|
||||
// size should be the size of children + grand children + clean after the merge
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
|
||||
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
|
||||
|
||||
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
||||
// ancestor and child should be merged
|
||||
@@ -405,19 +392,19 @@ describe('tests multiple combination ', () => {
|
||||
if (useAlias) {
|
||||
const dataSurvivors = await handler.list(basePath + '/data')
|
||||
// the goal of the alias : do not move a full folder
|
||||
assert.equal(dataSurvivors.includes('ancestor.vhd'), true)
|
||||
assert.equal(dataSurvivors.includes('grandchild.vhd'), true)
|
||||
assert.equal(dataSurvivors.includes('cleanAncestor.vhd'), true)
|
||||
assert.equal(survivors.includes('clean.vhd.alias.vhd'), true)
|
||||
assert.equal(survivors.includes('child.vhd.alias.vhd'), true)
|
||||
assert.equal(survivors.includes('grandchild.vhd.alias.vhd'), true)
|
||||
assert.equal(survivors.length, 4) // the 3 ok + data
|
||||
assert.equal(dataSurvivors.length, 3)
|
||||
expect(dataSurvivors).toContain('ancestor.vhd')
|
||||
expect(dataSurvivors).toContain('grandchild.vhd')
|
||||
expect(dataSurvivors).toContain('cleanAncestor.vhd')
|
||||
expect(survivors).toContain('clean.vhd.alias.vhd')
|
||||
expect(survivors).toContain('child.vhd.alias.vhd')
|
||||
expect(survivors).toContain('grandchild.vhd.alias.vhd')
|
||||
expect(survivors.length).toEqual(4) // the 3 ok + data
|
||||
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
|
||||
} else {
|
||||
assert.equal(survivors.includes('clean.vhd'), true)
|
||||
assert.equal(survivors.includes('child.vhd'), true)
|
||||
assert.equal(survivors.includes('grandchild.vhd'), true)
|
||||
assert.equal(survivors.length, 3)
|
||||
expect(survivors).toContain('clean.vhd')
|
||||
expect(survivors).toContain('child.vhd')
|
||||
expect(survivors).toContain('grandchild.vhd')
|
||||
expect(survivors.length).toEqual(3)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -427,9 +414,9 @@ describe('tests multiple combination ', () => {
|
||||
test('it cleans orphan merge states ', async () => {
|
||||
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
|
||||
|
||||
await adapter.cleanVm(rootPath, { remove: true, logWarn: () => {}, lock: false })
|
||||
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
|
||||
|
||||
assert.deepEqual(await handler.list(basePath), [])
|
||||
expect(await handler.list(basePath)).toEqual([])
|
||||
})
|
||||
|
||||
test('check Aliases should work alone', async () => {
|
||||
@@ -450,8 +437,8 @@ test('check Aliases should work alone', async () => {
|
||||
|
||||
// only ok have suvived
|
||||
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
|
||||
assert.equal(alias.length, 1)
|
||||
expect(alias.length).toEqual(1)
|
||||
|
||||
const data = await handler.list('vhds/data')
|
||||
assert.equal(data.length, 1)
|
||||
expect(data.length).toEqual(1)
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user