Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
f618fcdaf8 feat(vhd): implement encryption on vhd directory 2022-06-18 11:12:30 +02:00
782 changed files with 12869 additions and 43431 deletions

View File

@@ -1 +0,0 @@
{ "extends": ["@commitlint/config-conventional"] }

View File

@@ -28,10 +28,8 @@ module.exports = {
},
},
{
files: ['*.{spec,test}.{,c,m}js'],
files: ['*.spec.{,c,m}js'],
rules: {
'n/no-unpublished-require': 'off',
'n/no-unpublished-import': 'off',
'n/no-unsupported-features/node-builtins': [
'error',
{

View File

@@ -6,10 +6,7 @@ labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
1. ⚠️ **If you don't follow this template, the issue will be closed**.
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
Are you using XOA or XO from the sources?
**XOA or XO from the sources?**
If XOA:
@@ -18,7 +15,6 @@ If XOA:
If XO from the sources:
- Provide **your commit number**. If it's older than a week, we won't investigate
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
@@ -42,6 +38,8 @@ If applicable, add screenshots to help explain your problem.
**Environment (please provide the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**

View File

@@ -4,6 +4,7 @@ about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**

View File

@@ -1,32 +0,0 @@
name: Continous Integration
on: push
jobs:
CI:
runs-on: ubuntu-latest
steps:
# https://github.com/actions/checkout
- uses: actions/checkout@v3
- name: Install packages
run: |
sudo apt-get update
sudo apt-get install -y curl qemu-utils python3-vmdkstream git libxml2-utils libfuse2 nbdkit
- name: Cache Turbo
# https://github.com/actions/cache
uses: actions/cache@v3
with:
path: '**/node_modules/.cache/turbo'
key: ${{ runner.os }}-turbo-cache
- name: Setup Node environment
# https://github.com/actions/setup-node
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'yarn'
- name: Install project dependencies
run: yarn
- name: Build the project
run: yarn build
- name: Lint tests
run: yarn test-lint
- name: Integration tests
run: sudo yarn test-integration

13
.github/workflows/push.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: CI
on: [push]
jobs:
build:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- run: docker-compose -f docker/docker-compose.dev.yml build
- run: docker-compose -f docker/docker-compose.dev.yml up

3
.gitignore vendored
View File

@@ -10,6 +10,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat
@@ -34,4 +36,3 @@ yarn-error.log.*
# code coverage
.nyc_output/
coverage/
.turbo/

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
# Only check commit message if commit on master or first commit on another
# branch to avoid bothering fix commits after reviews
#
# FIXME: does not properly run with git commit --amend
if [ "$(git rev-parse --abbrev-ref HEAD)" = master ] || [ "$(git rev-list --count master..)" -eq 0 ]
then
npx --no -- commitlint --edit "$1"
fi

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx lint-staged

View File

@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
```sh
npm install --save @vates/async-each
```
> npm install --save @vates/async-each
```
## Usage
@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -9,16 +9,7 @@ class AggregateError extends Error {
}
}
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []

View File

@@ -1,8 +1,6 @@
'use strict'
const { describe, it, beforeEach } = require('test')
const assert = require('assert').strict
const { spy } = require('sinon')
/* eslint-env jest */
const { asyncEach } = require('./')
@@ -36,18 +34,12 @@ describe('asyncEach', () => {
})
it('works', async () => {
const iteratee = spy(async () => {})
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
await asyncEach.call(thisArg, iterable, iteratee)
assert.deepStrictEqual(
iteratee.thisValues,
Array.from(values, () => thisArg)
)
assert.deepStrictEqual(
iteratee.args,
Array.from(values, (value, index) => [value, index, iterable])
)
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
;[1, 2, 4].forEach(concurrency => {
it('respects a concurrency of ' + concurrency, async () => {
@@ -57,7 +49,7 @@ describe('asyncEach', () => {
values,
async () => {
++running
assert.deepStrictEqual(running <= concurrency, true)
expect(running).toBeLessThanOrEqual(concurrency)
await randomDelay()
--running
},
@@ -67,52 +59,40 @@ describe('asyncEach', () => {
})
it('stops on first error when stopOnError is true', async () => {
const tracker = new assert.CallTracker()
const error = new Error()
const iteratee = tracker.calls((_, i) => {
const iteratee = jest.fn((_, i) => {
if (i === 1) {
throw error
}
}, 2)
assert.deepStrictEqual(
await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true })),
error
)
})
tracker.verify()
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
it('rejects AggregateError when stopOnError is false', async () => {
const errors = []
const iteratee = spy(() => {
const iteratee = jest.fn(() => {
const error = new Error()
errors.push(error)
throw error
})
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
assert.deepStrictEqual(error.errors, errors)
assert.deepStrictEqual(
iteratee.args,
Array.from(values, (value, index) => [value, index, iterable])
)
expect(error.errors).toEqual(errors)
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
it('can be interrupted with an AbortSignal', async () => {
const tracker = new assert.CallTracker()
const ac = new AbortController()
const iteratee = tracker.calls((_, i) => {
const iteratee = jest.fn((_, i) => {
if (i === 1) {
ac.abort()
}
}, 2)
await assert.rejects(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal }), {
message: 'asyncEach aborted',
})
tracker.verify()
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
})
})
)

View File

@@ -24,17 +24,11 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"sinon": "^15.0.1",
"tap": "^16.3.0",
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/cached-dns.lookup):
```sh
npm install --save @vates/cached-dns.lookup
```
> npm install --save @vates/cached-dns.lookup
```
## Usage

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-calls):
```sh
npm install --save @vates/coalesce-calls
```
> npm install --save @vates/coalesce-calls
```
## Usage

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('assert')
/* eslint-env jest */
const { coalesceCalls } = require('./')
@@ -24,13 +23,13 @@ describe('coalesceCalls', () => {
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
assert.strictEqual(await promise1, 'foo')
assert.strictEqual(await promise2, 'foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
assert.strictEqual(await promise3, 'bar')
expect(await promise3).toBe('bar')
})
})

View File

@@ -30,10 +30,6 @@
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
```sh
npm install --save @vates/compose
```
> npm install --save @vates/compose
```
## Usage

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert').strict
/* eslint-env jest */
const { compose } = require('./')
@@ -10,42 +9,43 @@ const mul3 = x => x * 3
describe('compose()', () => {
it('throws when no functions is passed', () => {
assert.throws(() => compose(), TypeError)
assert.throws(() => compose([]), TypeError)
expect(() => compose()).toThrow(TypeError)
expect(() => compose([])).toThrow(TypeError)
})
it('applies from left to right', () => {
assert.strictEqual(compose(add2, mul3)(5), 21)
expect(compose(add2, mul3)(5)).toBe(21)
})
it('accepts functions in an array', () => {
assert.strictEqual(compose([add2, mul3])(5), 21)
expect(compose([add2, mul3])(5)).toBe(21)
})
it('can apply from right to left', () => {
assert.strictEqual(compose({ right: true }, add2, mul3)(5), 17)
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
})
it('accepts options with functions in an array', () => {
assert.strictEqual(compose({ right: true }, [add2, mul3])(5), 17)
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
})
it('can compose async functions', async () => {
assert.strictEqual(
expect(
await compose(
{ async: true },
async x => x + 2,
async x => x * 3
)(5),
21
)
)(5)
).toBe(21)
})
it('forwards all args to first function', () => {
expect.assertions(1)
const expectedArgs = [Math.random(), Math.random()]
compose(
(...args) => {
assert.deepEqual(args, expectedArgs)
expect(args).toEqual(expectedArgs)
},
// add a second function to avoid the one function special case
Function.prototype
@@ -53,13 +53,15 @@ describe('compose()', () => {
})
it('forwards context to all functions', () => {
expect.assertions(2)
const expectedThis = {}
compose(
function () {
assert.strictEqual(this, expectedThis)
expect(this).toBe(expectedThis)
},
function () {
assert.strictEqual(this, expectedThis)
expect(this).toBe(expectedThis)
}
).call(expectedThis)
})

View File

@@ -19,10 +19,6 @@
"node": ">=7.6"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/decorate-with):
```sh
npm install --save @vates/decorate-with
```
> npm install --save @vates/decorate-with
```
## Usage

View File

@@ -1,7 +1,7 @@
'use strict'
const assert = require('assert')
const { describe, it } = require('test')
const { describe, it } = require('tap').mocha
const { decorateClass, decorateWith, decorateMethodsWith, perInstance } = require('./')

View File

@@ -26,9 +26,9 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"test": "tap"
},
"devDependencies": {
"test": "^3.2.1"
"tap": "^16.0.1"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
```sh
npm install --save @vates/disposable
```
> npm install --save @vates/disposable
```
## Usage

View File

@@ -1,17 +1,16 @@
'use strict'
const { describe, it } = require('test')
const { useFakeTimers, spy, assert } = require('sinon')
/* eslint-env jest */
const { createDebounceResource } = require('./debounceResource')
const clock = useFakeTimers()
jest.useFakeTimers()
describe('debounceResource()', () => {
it('calls the resource disposer after 10 seconds', async () => {
const debounceResource = createDebounceResource()
const delay = 10e3
const dispose = spy()
const dispose = jest.fn()
const resource = await debounceResource(
Promise.resolve({
@@ -23,10 +22,10 @@ describe('debounceResource()', () => {
resource.dispose()
assert.notCalled(dispose)
expect(dispose).not.toBeCalled()
clock.tick(delay)
jest.advanceTimersByTime(delay)
assert.called(dispose)
expect(dispose).toBeCalled()
})
})

View File

@@ -1,14 +1,13 @@
'use strict'
const { describe, it } = require('test')
const { spy, assert } = require('sinon')
/* eslint-env jest */
const { deduped } = require('./deduped')
describe('deduped()', () => {
it('calls the resource function only once', async () => {
const value = {}
const getResource = spy(async () => ({
const getResource = jest.fn(async () => ({
value,
dispose: Function.prototype,
}))
@@ -18,13 +17,13 @@ describe('deduped()', () => {
const { value: v1 } = await dedupedGetResource()
const { value: v2 } = await dedupedGetResource()
assert.calledOnce(getResource)
assert.match(v1, value)
assert.match(v2, value)
expect(getResource).toHaveBeenCalledTimes(1)
expect(v1).toBe(value)
expect(v2).toBe(value)
})
it('only disposes the source disposable when its all copies dispose', async () => {
const dispose = spy()
const dispose = jest.fn()
const getResource = async () => ({
value: '',
dispose,
@@ -37,35 +36,35 @@ describe('deduped()', () => {
d1()
assert.notCalled(dispose)
expect(dispose).not.toHaveBeenCalled()
d2()
assert.calledOnce(dispose)
expect(dispose).toHaveBeenCalledTimes(1)
})
it('works with sync factory', () => {
const value = {}
const dispose = spy()
const dispose = jest.fn()
const dedupedGetResource = deduped(() => ({ value, dispose }))
const d1 = dedupedGetResource()
assert.match(d1.value, value)
expect(d1.value).toBe(value)
const d2 = dedupedGetResource()
assert.match(d2.value, value)
expect(d2.value).toBe(value)
d1.dispose()
assert.notCalled(dispose)
expect(dispose).not.toHaveBeenCalled()
d2.dispose()
assert.calledOnce(dispose)
expect(dispose).toHaveBeenCalledTimes(1)
})
it('no race condition on dispose before async acquisition', async () => {
const dispose = spy()
const dispose = jest.fn()
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
const d1 = await dedupedGetResource()
@@ -74,6 +73,6 @@ describe('deduped()', () => {
d1.dispose()
assert.notCalled(dispose)
expect(dispose).not.toHaveBeenCalled()
})
})

View File

@@ -14,22 +14,17 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.4",
"version": "0.1.1",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/log": "^0.3.0",
"ensure-array": "^1.0.0"
},
"devDependencies": {
"sinon": "^15.0.1",
"test": "^3.2.1"
}
}

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/event-listeners-manager):
```sh
npm install --save @vates/event-listeners-manager
```
> npm install --save @vates/event-listeners-manager
```
## Usage

View File

@@ -35,7 +35,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.1",
"version": "1.0.0",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --branches=72"

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,66 +0,0 @@
'use strict'
const LRU = require('lru-cache')
const Fuse = require('fuse-native')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
mtime: st.mtime || new Date(),
atime: st.atime || new Date(),
ctime: st.ctime || new Date(),
size: st.size !== undefined ? st.size : 0,
mode: st.mode === 'dir' ? 16877 : st.mode === 'file' ? 33188 : st.mode === 'link' ? 41453 : st.mode,
uid: st.uid !== undefined ? st.uid : process.getuid(),
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({
max: 16, // each cached block is 2MB in size
})
await vhd.readBlockAllocationTable()
const fuse = new Fuse(mountDir, {
async readdir(path, cb) {
if (path === '/') {
return cb(null, ['vhd0'])
}
cb(Fuse.ENOENT)
},
async getattr(path, cb) {
if (path === '/') {
return cb(
null,
stat({
mode: 'dir',
size: 4096,
})
)
}
if (path === '/vhd0') {
return cb(
null,
stat({
mode: 'file',
size: vhd.footer.currentSize,
})
)
}
cb(Fuse.ENOENT)
},
read(path, fd, buf, len, pos, cb) {
if (path === '/vhd0') {
return vhd.readRawData(pos, len, cache, buf).then(cb)
}
throw new Error(`read file ${path} not exists`)
},
})
return new Disposable(
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -1,29 +0,0 @@
{
"name": "@vates/fuse-vhd",
"version": "1.0.0",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/fuse-vhd",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
},
"dependencies": {
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.2.1"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
```sh
npm install --save @vates/multi-key-map
```
> npm install --save @vates/multi-key-map
```
## Usage

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert')
/* eslint-env jest */
const { MultiKeyMap } = require('./')
@@ -29,9 +28,9 @@ describe('MultiKeyMap', () => {
keys.forEach((key, i) => {
// copy the key to make sure the array itself is not the key
assert.strictEqual(map.get(key.slice()), values[i])
expect(map.get(key.slice())).toBe(values[i])
map.delete(key.slice())
assert.strictEqual(map.get(key.slice()), undefined)
expect(map.get(key.slice())).toBe(undefined)
})
})
})

View File

@@ -23,10 +23,6 @@
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,16 +0,0 @@
### `new NdbClient({address, exportname, secure = true, port = 10809})`
create a new nbd client
```js
import NbdClient from '@vates/nbd-client'
const client = new NbdClient({
address: 'MY_NBD_HOST',
exportname: 'MY_SECRET_EXPORT',
cert: 'Server certificate', // optional, will use encrypted link if provided
})
await client.connect()
const block = await client.readBlock(blockIndex, BlockSize)
await client.disconnect()
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,47 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/nbd-client
[![Package Version](https://badgen.net/npm/v/@vates/nbd-client)](https://npmjs.org/package/@vates/nbd-client) ![License](https://badgen.net/npm/license/@vates/nbd-client) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/nbd-client)](https://bundlephobia.com/result?p=@vates/nbd-client) [![Node compatibility](https://badgen.net/npm/node/@vates/nbd-client)](https://npmjs.org/package/@vates/nbd-client)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/nbd-client):
```sh
npm install --save @vates/nbd-client
```
## Usage
### `new NdbClient({address, exportname, secure = true, port = 10809})`
create a new nbd client
```js
import NbdClient from '@vates/nbd-client'
const client = new NbdClient({
address: 'MY_NBD_HOST',
exportname: 'MY_SECRET_EXPORT',
cert: 'Server certificate', // optional, will use encrypted link if provided
})
await client.connect()
const block = await client.readBlock(blockIndex, BlockSize)
await client.disconnect()
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,42 +0,0 @@
'use strict'
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
exports.NBD_OPT_EXPORT_NAME = 1
exports.NBD_OPT_ABORT = 2
exports.NBD_OPT_LIST = 3
exports.NBD_OPT_STARTTLS = 5
exports.NBD_OPT_INFO = 6
exports.NBD_OPT_GO = 7
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
exports.NBD_FLAG_READ_ONLY = 1 << 1
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
exports.NBD_FLAG_SEND_FUA = 1 << 3
exports.NBD_FLAG_ROTATIONAL = 1 << 4
exports.NBD_FLAG_SEND_TRIM = 1 << 5
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
exports.NBD_CMD_FLAG_FUA = 1 << 0
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
exports.NBD_CMD_FLAG_DF = 1 << 2
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
exports.NBD_CMD_READ = 0
exports.NBD_CMD_WRITE = 1
exports.NBD_CMD_DISC = 2
exports.NBD_CMD_FLUSH = 3
exports.NBD_CMD_TRIM = 4
exports.NBD_CMD_CACHE = 5
exports.NBD_CMD_WRITE_ZEROES = 6
exports.NBD_CMD_BLOCK_STATUS = 7
exports.NBD_CMD_RESIZE = 8
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
exports.NBD_REPLY_ACK = 1
exports.NBD_DEFAULT_PORT = 10809
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024

View File

@@ -1,249 +0,0 @@
'use strict'
const assert = require('node:assert')
const { Socket } = require('node:net')
const { connect } = require('node:tls')
const {
INIT_PASSWD,
NBD_CMD_READ,
NBD_DEFAULT_BLOCK_SIZE,
NBD_DEFAULT_PORT,
NBD_FLAG_FIXED_NEWSTYLE,
NBD_FLAG_HAS_FLAGS,
NBD_OPT_EXPORT_NAME,
NBD_OPT_REPLY_MAGIC,
NBD_OPT_STARTTLS,
NBD_REPLY_ACK,
NBD_REPLY_MAGIC,
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
NBD_CMD_DISC,
} = require('./constants.js')
const { fromCallback } = require('promise-toolbox')
const { readChunkStrict } = require('@vates/read-chunk')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
module.exports = class NbdClient {
#serverAddress
#serverCert
#serverPort
#serverSocket
#exportName
#exportSize
// AFAIK, there is no guaranty the server answers in the same order as the queries
// so we handle a backlog of command waiting for response and handle concurrency manually
#waitingForResponse // there is already a listenner waiting for a response
#nextCommandQueryId = BigInt(0)
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
this.#serverAddress = address
this.#serverPort = port
this.#exportName = exportname
this.#serverCert = cert
}
get exportSize() {
return this.#exportSize
}
async #tlsConnect() {
return new Promise((resolve, reject) => {
this.#serverSocket = connect({
socket: this.#serverSocket,
rejectUnauthorized: false,
cert: this.#serverCert,
})
this.#serverSocket.once('error', reject)
this.#serverSocket.once('secureConnect', () => {
this.#serverSocket.removeListener('error', reject)
resolve()
})
})
}
// mandatory , at least to start the handshake
async #unsecureConnect() {
this.#serverSocket = new Socket()
return new Promise((resolve, reject) => {
this.#serverSocket.connect(this.#serverPort, this.#serverAddress)
this.#serverSocket.once('error', reject)
this.#serverSocket.once('connect', () => {
this.#serverSocket.removeListener('error', reject)
resolve()
})
})
}
async connect() {
// first we connect to the serve without tls, and then we upgrade the connection
// to tls during the handshake
await this.#unsecureConnect()
await this.#handshake()
// reset internal state if we reconnected a nbd client
this.#commandQueryBacklog = new Map()
this.#waitingForResponse = false
}
async disconnect() {
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
await this.#write(buffer)
await this.#serverSocket.destroy()
}
// we can use individual read/write from the socket here since there is no concurrency
async #sendOption(option, buffer = Buffer.alloc(0)) {
await this.#write(OPTS_MAGIC)
await this.#writeInt32(option)
await this.#writeInt32(buffer.length)
await this.#write(buffer)
assert.strictEqual(await this.#readInt64(), NBD_OPT_REPLY_MAGIC) // magic number everywhere
assert.strictEqual(await this.#readInt32(), option) // the option passed
assert.strictEqual(await this.#readInt32(), NBD_REPLY_ACK) // ACK
const length = await this.#readInt32()
assert.strictEqual(length, 0) // length
}
// we can use individual read/write from the socket here since there is only one handshake at once, no concurrency
async #handshake() {
assert((await this.#read(8)).equals(INIT_PASSWD))
assert((await this.#read(8)).equals(OPTS_MAGIC))
const flagsBuffer = await this.#read(2)
const flags = flagsBuffer.readInt16BE(0)
assert.strictEqual(flags & NBD_FLAG_FIXED_NEWSTYLE, NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
await this.#writeInt32(NBD_FLAG_FIXED_NEWSTYLE) // client also support NBD_FLAG_C_FIXED_NEWSTYLE
if (this.#serverCert !== undefined) {
// upgrade socket to TLS if needed
await this.#sendOption(NBD_OPT_STARTTLS)
await this.#tlsConnect()
}
// send export name we want to access.
// it's implictly closing the negociation phase.
await this.#write(OPTS_MAGIC)
await this.#writeInt32(NBD_OPT_EXPORT_NAME)
const exportNameBuffer = Buffer.from(this.#exportName)
await this.#writeInt32(exportNameBuffer.length)
await this.#write(exportNameBuffer)
// 8 (export size ) + 2 (flags) + 124 zero = 134
// must read all to ensure nothing stays in the buffer
const answer = await this.#read(134)
this.#exportSize = answer.readBigUInt64BE(0)
const transmissionFlags = answer.readInt16BE(8)
assert.strictEqual(transmissionFlags & NBD_FLAG_HAS_FLAGS, NBD_FLAG_HAS_FLAGS, 'NBD_FLAG_HAS_FLAGS') // must always be 1 by the norm
// note : xapi server always send NBD_FLAG_READ_ONLY (3) as a flag
}
#read(length) {
return readChunkStrict(this.#serverSocket, length)
}
#write(buffer) {
return fromCallback.call(this.#serverSocket, 'write', buffer)
}
async #readInt32() {
const buffer = await this.#read(4)
return buffer.readInt32BE(0)
}
async #readInt64() {
const buffer = await this.#read(8)
return buffer.readBigUInt64BE(0)
}
#writeInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeInt32BE(int)
return this.#write(buffer)
}
// when one read fail ,stop everything
async #rejectAll(error) {
this.#commandQueryBacklog.forEach(({ reject }) => {
reject(error)
})
await this.disconnect()
}
async #readBlockResponse() {
// ensure at most one read occur in parallel
if (this.#waitingForResponse) {
return
}
try {
this.#waitingForResponse = true
const magic = await this.#readInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
}
const error = await this.#readInt32()
if (error !== 0) {
// @todo use error code from constants.mjs
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = await this.#readInt64()
const query = this.#commandQueryBacklog.get(blockQueryId)
if (!query) {
throw new Error(` no query associated with id ${blockQueryId}`)
}
this.#commandQueryBacklog.delete(blockQueryId)
const data = await this.#read(query.size)
query.resolve(data)
this.#waitingForResponse = false
if (this.#commandQueryBacklog.size > 0) {
await this.#readBlockResponse()
}
} catch (error) {
// reject all the promises
// we don't need to call readBlockResponse on failure
// since we will empty the backlog
await this.#rejectAll(error)
}
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
// create and send command at once to ensure there is no concurrency issue
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a simple block read
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
buffer.writeInt32BE(size, 24)
return new Promise((resolve, reject) => {
// this will handle one block response, but it can be another block
// since server does not guaranty to handle query in order
this.#commandQueryBacklog.set(queryId, {
size,
resolve,
reject,
})
// really send the command to the server
this.#write(buffer).catch(reject)
// #readBlockResponse never throws directly
// but if it fails it will reject all the promises in the backlog
this.#readBlockResponse()
})
}
}

View File

@@ -1,76 +0,0 @@
'use strict'
const NbdClient = require('./index.js')
const { spawn } = require('node:child_process')
const fs = require('node:fs/promises')
const { test } = require('tap')
const tmp = require('tmp')
const { pFromCallback } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const FILE_SIZE = 2 * 1024 * 1024
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
const data = Buffer.alloc(size, 0)
for (let i = 0; i < size; i += 4) {
data.writeUInt32BE(i, i)
}
await fs.writeFile(tmpPath, data)
return tmpPath
}
test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
const nbdServer = spawn(
'nbdkit',
[
'file',
path,
'--newstyle', //
'--exit-with-parent',
'--read-only',
'--export-name=MY_SECRET_EXPORT',
],
{
stdio: ['inherit', 'inherit', 'inherit'],
}
)
const client = new NbdClient({
address: 'localhost',
exportname: 'MY_SECRET_EXPORT',
secure: false,
})
await client.connect()
tap.equal(client.exportSize, BigInt(FILE_SIZE))
const CHUNK_SIZE = 128 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
}
// read mutiple blocks in parallel
await asyncEach(
indexes,
async i => {
const block = await client.readBlock(i, CHUNK_SIZE)
let blockOk = true
let firstFail
for (let j = 0; j < CHUNK_SIZE; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
if (!blockOk && firstFail === undefined) {
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content`)
},
{ concurrency: 8 }
)
await client.disconnect()
nbdServer.kill()
await fs.unlink(path)
})

View File

@@ -1,35 +0,0 @@
{
"private": false,
"name": "@vates/nbd-client",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/nbd-client",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/nbd-client",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.1",
"engines": {
"node": ">=14.0"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"promise-toolbox": "^0.21.0",
"xen-api": "^1.2.7"
},
"devDependencies": {
"tap": "^16.3.0",
"tmp": "^0.2.1"
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "tap *.spec.js"
}
}

View File

@@ -1,130 +0,0 @@
### Usual workflow
> This section presents how this library should be used to implement a classic two factor authentification.
#### Setup
```js
import { generateSecret, generateTotp } from '@vates/otp'
import QrCode from 'qrcode'
// Generates a secret that will be shared by both the service and the user:
const secret = generateSecret()
// Stores the secret in the service:
await currentUser.saveOtpSecret(secret)
// Generates an URI to present to the user
const uri = generateTotpUri({ secret })
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
const qr = await QrCode.toDataURL(uri)
```
#### Authentication
```js
import { verifyTotp } from '@vates/otp'
// Verifies a `token` entered by the user against a `secret` generated during setup.
if (await verifyTotp(token, { secret })) {
console.log('authenticated!')
}
```
### API
#### Secret
```js
import { generateSecret } from '@vates/otp'
const secret = generateSecret()
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
#### HOTP
> This is likely not what you want to use, see TOTP below instead.
```js
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
// a sequence number, see HOTP specification
const counter = 0
// generate a token
//
// optional params:
// - digits
const token = await generateHotp({ counter, secret })
// '239988'
// verify a token
//
// optional params:
// - digits
const isValid = await verifyHotp(token, { counter, secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
#### TOTP
```js
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
// generate a token
//
// optional params:
// - digits
// - period
// - timestamp
const token = await generateTotp({ secret })
// '632869'
// verify a token
//
// optional params:
// - digits
// - period
// - timestamp
// - window
const isValid = await verifyTotp(token, { secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
// - period
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
- `period = 30`: number of seconds a token is valid
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
#### Verification from URI
```js
import { verifyFromUri } from '@vates/otp'
// Verify the token using all the information contained in the URI
const isValid = await verifyFromUri(token, uri)
// true
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,163 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/otp
[![Package Version](https://badgen.net/npm/v/@vates/otp)](https://npmjs.org/package/@vates/otp) ![License](https://badgen.net/npm/license/@vates/otp) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/otp)](https://bundlephobia.com/result?p=@vates/otp) [![Node compatibility](https://badgen.net/npm/node/@vates/otp)](https://npmjs.org/package/@vates/otp)
> Minimal HTOP/TOTP implementation
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/otp):
```sh
npm install --save @vates/otp
```
## Usage
### Usual workflow
> This section presents how this library should be used to implement a classic two factor authentification.
#### Setup
```js
import { generateSecret, generateTotp } from '@vates/otp'
import QrCode from 'qrcode'
// Generates a secret that will be shared by both the service and the user:
const secret = generateSecret()
// Stores the secret in the service:
await currentUser.saveOtpSecret(secret)
// Generates an URI to present to the user
const uri = generateTotpUri({ secret })
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
const qr = await QrCode.toDataURL(uri)
```
#### Authentication
```js
import { verifyTotp } from '@vates/otp'
// Verifies a `token` entered by the user against a `secret` generated during setup.
if (await verifyTotp(token, { secret })) {
console.log('authenticated!')
}
```
### API
#### Secret
```js
import { generateSecret } from '@vates/otp'
const secret = generateSecret()
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
#### HOTP
> This is likely not what you want to use, see TOTP below instead.
```js
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
// a sequence number, see HOTP specification
const counter = 0
// generate a token
//
// optional params:
// - digits
const token = await generateHotp({ counter, secret })
// '239988'
// verify a token
//
// optional params:
// - digits
const isValid = await verifyHotp(token, { counter, secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
#### TOTP
```js
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
// generate a token
//
// optional params:
// - digits
// - period
// - timestamp
const token = await generateTotp({ secret })
// '632869'
// verify a token
//
// optional params:
// - digits
// - period
// - timestamp
// - window
const isValid = await verifyTotp(token, { secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
// - period
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
- `period = 30`: number of seconds a token is valid
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
#### Verification from URI
```js
import { verifyFromUri } from '@vates/otp'
// Verify the token using all the information contained in the URI
const isValid = await verifyFromUri(token, uri)
// true
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,111 +0,0 @@
import { base32 } from 'rfc4648'
import { webcrypto } from 'node:crypto'
const { subtle } = webcrypto
function assert(name, value) {
if (!value) {
throw new TypeError('invalid value for param ' + name)
}
}
// https://github.com/google/google-authenticator/wiki/Key-Uri-Format
function generateUri(protocol, label, params) {
assert('label', typeof label === 'string')
assert('secret', typeof params.secret === 'string')
let path = encodeURIComponent(label)
const { issuer } = params
if (issuer !== undefined) {
path = encodeURIComponent(issuer) + ':' + path
}
const query = Object.entries(params)
.filter(_ => _[1] !== undefined)
.map(([key, value]) => key + '=' + encodeURIComponent(value))
.join('&')
return `otpauth://${protocol}/${path}?${query}`
}
export function generateSecret() {
// https://www.rfc-editor.org/rfc/rfc4226 recommends 160 bits (i.e. 20 bytes)
const data = new Uint8Array(20)
webcrypto.getRandomValues(data)
return base32.stringify(data, { pad: false })
}
const DIGITS = 6
// https://www.rfc-editor.org/rfc/rfc4226
export async function generateHotp({ counter, digits = DIGITS, secret }) {
const data = new Uint8Array(8)
new DataView(data.buffer).setBigInt64(0, BigInt(counter), false)
const key = await subtle.importKey(
'raw',
base32.parse(secret, { loose: true }),
{ name: 'HMAC', hash: 'SHA-1' },
false,
['sign', 'verify']
)
const digest = new DataView(await subtle.sign('HMAC', key, data))
const offset = digest.getUint8(digest.byteLength - 1) & 0xf
const p = digest.getUint32(offset) & 0x7f_ff_ff_ff
return String(p % Math.pow(10, digits)).padStart(digits, '0')
}
export function generateHotpUri({ counter, digits, issuer, label, secret }) {
assert('counter', typeof counter === 'number')
return generateUri('hotp', label, { counter, digits, issuer, secret })
}
export async function verifyHotp(token, opts) {
return token === (await generateHotp(opts))
}
function totpCounter(period = 30, timestamp = Math.floor(Date.now() / 1e3)) {
return Math.floor(timestamp / period)
}
// https://www.rfc-editor.org/rfc/rfc6238.html
export async function generateTotp({ period, timestamp, ...opts }) {
opts.counter = totpCounter(period, timestamp)
return await generateHotp(opts)
}
export function generateTotpUri({ digits, issuer, label, period, secret }) {
return generateUri('totp', label, { digits, issuer, period, secret })
}
export async function verifyTotp(token, { period, timestamp, window = 1, ...opts }) {
const counter = totpCounter(period, timestamp)
const end = counter + window
opts.counter = counter - window
while (opts.counter <= end) {
if (token === (await generateHotp(opts))) {
return true
}
opts.counter += 1
}
return false
}
export async function verifyFromUri(token, uri) {
const url = new URL(uri)
assert('protocol', url.protocol === 'otpauth:')
const { host } = url
const opts = Object.fromEntries(url.searchParams.entries())
if (host === 'hotp') {
return await verifyHotp(token, opts)
}
if (host === 'totp') {
return await verifyTotp(token, opts)
}
assert('host', false)
}

View File

@@ -1,112 +0,0 @@
import { strict as assert } from 'node:assert'
import { describe, it } from 'tap/mocha'
import {
generateHotp,
generateHotpUri,
generateSecret,
generateTotp,
generateTotpUri,
verifyHotp,
verifyTotp,
} from './index.mjs'
describe('generateSecret', function () {
it('generates a string of 32 chars', async function () {
const secret = generateSecret()
assert.equal(typeof secret, 'string')
assert.equal(secret.length, 32)
})
it('generates a different secret at each call', async function () {
assert.notEqual(generateSecret(), generateSecret())
})
})
describe('HOTP', function () {
it('generate and verify valid tokens', async function () {
for (const [token, opts] of Object.entries({
382752: {
counter: -3088,
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
},
163376: {
counter: 30598,
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
},
})) {
assert.equal(await generateHotp(opts), token)
assert(await verifyHotp(token, opts))
}
})
describe('generateHotpUri', function () {
const opts = {
counter: 59732,
label: 'the label',
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
}
Object.entries({
'without optional params': [
opts,
'otpauth://hotp/the%20label?counter=59732&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with issuer': [
{ ...opts, issuer: 'the issuer' },
'otpauth://hotp/the%20issuer:the%20label?counter=59732&issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with digits': [
{ ...opts, digits: 7 },
'otpauth://hotp/the%20label?counter=59732&digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
}).forEach(([title, [opts, uri]]) => {
it(title, async function () {
assert.strictEqual(generateHotpUri(opts), uri)
})
})
})
})
describe('TOTP', function () {
Object.entries({
'033702': {
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
timestamp: 1665416296,
period: 30,
},
107250: {
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
timestamp: 1665416674,
period: 60,
},
}).forEach(([token, opts]) => {
it('works', async function () {
assert.equal(await generateTotp(opts), token)
assert(await verifyTotp(token, opts))
})
})
describe('generateHotpUri', function () {
const opts = {
label: 'the label',
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
}
Object.entries({
'without optional params': [opts, 'otpauth://totp/the%20label?secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX'],
'with issuer': [
{ ...opts, issuer: 'the issuer' },
'otpauth://totp/the%20issuer:the%20label?issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with digits': [
{ ...opts, digits: 7 },
'otpauth://totp/the%20label?digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
}).forEach(([title, [opts, uri]]) => {
it(title, async function () {
assert.strictEqual(generateTotpUri(opts), uri)
})
})
})
})

View File

@@ -1,39 +0,0 @@
{
"private": false,
"name": "@vates/otp",
"description": "Minimal HTOP/TOTP implementation",
"keywords": [
"2fa",
"authenticator",
"hotp",
"otp",
"totp"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/otp",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"main": "index.mjs",
"repository": {
"directory": "@vates/otp",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=15"
},
"dependencies": {
"rfc4648": "^1.5.2"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^16.3.0"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/parse-duration):
```sh
npm install --save @vates/parse-duration
```
> npm install --save @vates/parse-duration
```
## Usage

View File

@@ -1,7 +1,7 @@
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
@@ -36,21 +36,6 @@ isBetween3And10(10)
// → false
```
### `not(predicate)`
> Returns a predicate that returns the negation of the predicate.
```js
const isEven = n => n % 2 === 0
const isOdd = not(isEven)
isOdd(1)
// true
isOdd(2)
// false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
```sh
npm install --save @vates/predicates
```
> npm install --save @vates/predicates
```
## Usage
@@ -19,7 +19,7 @@ npm install --save @vates/predicates
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
@@ -54,21 +54,6 @@ isBetween3And10(10)
// → false
```
### `not(predicate)`
> Returns a predicate that returns the negation of the predicate.
```js
const isEven = n => n % 2 === 0
const isOdd = not(isEven)
isOdd(1)
// true
isOdd(2)
// false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.

View File

@@ -51,22 +51,6 @@ exports.every = function every() {
}
}
const notPredicateTag = {}
exports.not = function not(predicate) {
if (isDefinedPredicate(predicate)) {
if (predicate.tag === notPredicateTag) {
return predicate.predicate
}
function notPredicate() {
return !predicate.apply(this, arguments)
}
notPredicate.predicate = predicate
notPredicate.tag = notPredicateTag
return notPredicate
}
}
exports.some = function some() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length

View File

@@ -3,14 +3,20 @@
const assert = require('assert/strict')
const { describe, it } = require('tap').mocha
const { every, not, some } = require('./')
const { every, some } = require('./')
const T = () => true
const F = () => false
const testArgHandling = fn => {
it('returns undefined if predicate is undefined', () => {
const testArgsHandling = fn => {
it('returns undefined if all predicates are undefined', () => {
assert.equal(fn(undefined), undefined)
assert.equal(fn([undefined]), undefined)
})
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('throws if it receives a non-predicate', () => {
@@ -18,15 +24,6 @@ const testArgHandling = fn => {
error.value = 3
assert.throws(() => fn(3), error)
})
}
const testArgsHandling = fn => {
testArgHandling(fn)
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('forwards this and arguments to predicates', () => {
const thisArg = 'qux'
@@ -39,21 +36,17 @@ const testArgsHandling = fn => {
})
}
const runTests = (fn, acceptMultiple, truthTable) =>
const runTests = (fn, truthTable) =>
it('works', () => {
truthTable.forEach(([result, ...predicates]) => {
if (acceptMultiple) {
assert.equal(fn(predicates)(), result)
} else {
assert.equal(predicates.length, 1)
}
assert.equal(fn(...predicates)(), result)
assert.equal(fn(predicates)(), result)
})
})
describe('every', () => {
testArgsHandling(every)
runTests(every, true, [
runTests(every, [
[true, T, T],
[false, T, F],
[false, F, T],
@@ -61,22 +54,9 @@ describe('every', () => {
])
})
describe('not', () => {
testArgHandling(not)
it('returns the original predicate if negated twice', () => {
assert.equal(not(not(T)), T)
})
runTests(not, false, [
[true, F],
[false, T],
])
})
describe('some', () => {
testArgsHandling(some)
runTests(some, true, [
runTests(some, [
[true, T, T],
[true, T, F],
[true, F, T],

View File

@@ -26,7 +26,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.1.0",
"version": "1.0.0",
"engines": {
"node": ">=6"
},

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
```sh
npm install --save @vates/read-chunk
```
> npm install --save @vates/read-chunk
```
## Usage

View File

@@ -1,16 +1,7 @@
'use strict'
/**
* Read a chunk of data from a stream.
*
* @param {Readable} stream - A readable stream to read from.
* @param {number} size - The number of bytes to read.
* @returns {Promise<Buffer|null>} - A Promise that resolves to a Buffer of up to size bytes if available, or null if end of stream is reached. The Promise is rejected if there is an error while reading from the stream.
*/
const readChunk = (stream, size) =>
stream.closed || stream.readableEnded
? Promise.resolve(null)
: size === 0
size === 0
? Promise.resolve(Buffer.alloc(0))
: new Promise((resolve, reject) => {
function onEnd() {
@@ -40,13 +31,6 @@ const readChunk = (stream, size) =>
})
exports.readChunk = readChunk
/**
* Read a chunk of data from a stream.
*
* @param {Readable} stream - A readable stream to read from.
* @param {number} size - The number of bytes to read.
* @returns {Promise<Buffer>} - A Promise that resolves to a Buffer of size bytes. The Promise is rejected if there is an error while reading from the stream.
*/
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert').strict
/* eslint-env jest */
const { Readable } = require('stream')
@@ -12,42 +11,35 @@ makeStream.obj = Readable.from
describe('readChunk', () => {
it('returns null if stream is empty', async () => {
assert.strictEqual(await readChunk(makeStream([])), null)
})
it('returns null if the stream is already ended', async () => {
const stream = await makeStream([])
await readChunk(stream)
assert.strictEqual(await readChunk(stream), null)
expect(await readChunk(makeStream([]))).toBe(null)
})
describe('with binary stream', () => {
it('returns the first chunk of data', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar'])), Buffer.from('foo'))
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
})
it('returns a chunk of the specified size (smaller than first)', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 2), Buffer.from('fo'))
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
})
it('returns a chunk of the specified size (larger than first)', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 4), Buffer.from('foob'))
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
})
it('returns less data if stream ends', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
})
it('returns an empty buffer if the specified size is 0', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
})
})
describe('with object stream', () => {
it('returns the first chunk of data verbatim', async () => {
const chunks = [{}, {}]
assert.strictEqual(await readChunk(makeStream.obj(chunks)), chunks[0])
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
})
})
})
@@ -63,15 +55,15 @@ const rejectionOf = promise =>
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended without data')
assert.strictEqual(error.chunk, undefined)
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended without data')
expect(error.chunk).toEqual(undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data')
assert.deepEqual(error.chunk, Buffer.from('foobar'))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended with not enough data')
expect(error.chunk).toEqual(Buffer.from('foobar'))
})
})

View File

@@ -19,19 +19,15 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.0.1",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"postversion": "npm publish --access public"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"devDependencies": {
"test": "^3.2.1"
}
}

View File

@@ -1,54 +0,0 @@
```js
import { Task } from '@vates/task'
const task = new Task({
name: 'my task',
// if defined, a new detached task is created
//
// if not defined and created inside an existing task, the new task is considered a subtask
onProgress(event) {
// this function is called each time this task or one of it's subtasks change state
const { id, timestamp, type } = event
if (type === 'start') {
const { name, parentId } = event
} else if (type === 'end') {
const { result, status } = event
} else if (type === 'info' || type === 'warning') {
const { data, message } = event
} else if (type === 'property') {
const { name, value } = event
}
},
})
// this field is settable once before being observed
task.id
task.status
await task.abort()
// if fn rejects, the task will be marked as failed
const result = await task.runInside(fn)
// if fn rejects, the task will be marked as failed
// if fn resolves, the task will be marked as succeeded
const result = await task.run(fn)
// the abort signal of the current task if any, otherwise is `undefined`
Task.abortSignal
// sends an info on the current task if any, otherwise does nothing
Task.info(message, data)
// sends an info on the current task if any, otherwise does nothing
Task.warning(message, data)
// attaches a property to the current task if any, otherwise does nothing
//
// the latest value takes precedence
//
// examples:
// - progress
Task.set(property, value)
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,85 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/task
[![Package Version](https://badgen.net/npm/v/@vates/task)](https://npmjs.org/package/@vates/task) ![License](https://badgen.net/npm/license/@vates/task) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/task)](https://bundlephobia.com/result?p=@vates/task) [![Node compatibility](https://badgen.net/npm/node/@vates/task)](https://npmjs.org/package/@vates/task)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/task):
```sh
npm install --save @vates/task
```
## Usage
```js
import { Task } from '@vates/task'
const task = new Task({
name: 'my task',
// if defined, a new detached task is created
//
// if not defined and created inside an existing task, the new task is considered a subtask
onProgress(event) {
// this function is called each time this task or one of it's subtasks change state
const { id, timestamp, type } = event
if (type === 'start') {
const { name, parentId } = event
} else if (type === 'end') {
const { result, status } = event
} else if (type === 'info' || type === 'warning') {
const { data, message } = event
} else if (type === 'property') {
const { name, value } = event
}
},
})
// this field is settable once before being observed
task.id
task.status
await task.abort()
// if fn rejects, the task will be marked as failed
const result = await task.runInside(fn)
// if fn rejects, the task will be marked as failed
// if fn resolves, the task will be marked as succeeded
const result = await task.run(fn)
// the abort signal of the current task if any, otherwise is `undefined`
Task.abortSignal
// sends an info on the current task if any, otherwise does nothing
Task.info(message, data)
// sends an info on the current task if any, otherwise does nothing
Task.warning(message, data)
// attaches a property to the current task if any, otherwise does nothing
//
// the latest value takes precedence
//
// examples:
// - progress
Task.set(property, value)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,184 +0,0 @@
'use strict'
const assert = require('node:assert').strict
const { AsyncLocalStorage } = require('node:async_hooks')
// define a read-only, non-enumerable, non-configurable property
function define(object, property, value) {
Object.defineProperty(object, property, { value })
}
const noop = Function.prototype
const ABORTED = 'aborted'
const ABORTING = 'aborting'
const FAILURE = 'failure'
const PENDING = 'pending'
const SUCCESS = 'success'
exports.STATUS = { ABORTED, ABORTING, FAILURE, PENDING, SUCCESS }
const asyncStorage = new AsyncLocalStorage()
const getTask = () => asyncStorage.getStore()
exports.Task = class Task {
static get abortSignal() {
const task = getTask()
if (task !== undefined) {
return task.#abortController.signal
}
}
static info(message, data) {
const task = getTask()
if (task !== undefined) {
task.#emit('info', { data, message })
}
}
static run(opts, fn) {
return new this(opts).run(fn)
}
static set(name, value) {
const task = getTask()
if (task !== undefined) {
task.#emit('property', { name, value })
}
}
static warning(message, data) {
const task = getTask()
if (task !== undefined) {
task.#emit('warning', { data, message })
}
}
static wrap(opts, fn) {
// compatibility with @decorateWith
if (typeof fn !== 'function') {
;[fn, opts] = [opts, fn]
}
return function taskRun() {
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
}
}
#abortController = new AbortController()
#onProgress
#parent
get id() {
return (this.id = Math.random().toString(36).slice(2))
}
set id(value) {
define(this, 'id', value)
}
#startData
#status = PENDING
get status() {
return this.#status
}
constructor({ name, onProgress }) {
this.#startData = { name }
if (onProgress !== undefined) {
this.#onProgress = onProgress
} else {
const parent = getTask()
if (parent !== undefined) {
this.#parent = parent
const { signal } = parent.#abortController
signal.addEventListener('abort', () => {
this.#abortController.abort(signal.reason)
})
this.#onProgress = parent.#onProgress
this.#startData.parentId = parent.id
} else {
this.#onProgress = noop
}
}
const { signal } = this.#abortController
signal.addEventListener('abort', () => {
if (this.status === PENDING) {
this.#status = this.#running ? ABORTING : ABORTED
}
})
}
abort(reason) {
this.#abortController.abort(reason)
}
#emit(type, data) {
data.id = this.id
data.timestamp = Date.now()
data.type = type
this.#onProgress(data)
}
#handleMaybeAbortion(result) {
if (this.status === ABORTING) {
this.#status = ABORTED
this.#emit('end', { status: ABORTED, result })
return true
}
return false;
}
async run(fn) {
const result = await this.runInside(fn)
if (this.status === PENDING) {
this.#status = SUCCESS
this.#emit('end', { status: SUCCESS, result })
}
return result
}
#running = false
async runInside(fn) {
assert.equal(this.status, PENDING)
assert.equal(this.#running, false)
this.#running = true
const startData = this.#startData
if (startData !== undefined) {
this.#startData = undefined
this.#emit('start', startData)
}
try {
const result = await asyncStorage.run(this, fn)
this.#handleMaybeAbortion(result)
this.#running = false
return result
} catch (result) {
if (!this.#handleMaybeAbortion(result)) {
this.#status = FAILURE
this.#emit('end', { status: FAILURE, result })
}
throw result
}
}
wrap(fn) {
const task = this
return function taskRun() {
return task.run(() => fn.apply(this, arguments))
}
}
wrapInside(fn) {
const task = this
return function taskRunInside() {
return task.runInside(() => fn.apply(this, arguments))
}
}
}

View File

@@ -1,23 +0,0 @@
{
"private": false,
"name": "@vates/task",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/task",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/task",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.0.1",
"engines": {
"node": ">=14"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
```sh
npm install --save @vates/toggle-scripts
```
> npm install --save @vates/toggle-scripts
```
## Usage

View File

@@ -30,7 +30,6 @@ if (args.length === 0) {
${name} v${version}
`)
// eslint-disable-next-line n/no-process-exit
process.exit()
}

View File

@@ -10,8 +10,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async-map):
```sh
npm install --save @xen-orchestra/async-map
```
> npm install --save @xen-orchestra/async-map
```
## Usage

View File

@@ -1,8 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('assert').strict
const sinon = require('sinon')
/* eslint-env jest */
const { asyncMapSettled } = require('./')
@@ -11,29 +9,26 @@ const noop = Function.prototype
describe('asyncMapSettled', () => {
it('works', async () => {
const values = [Math.random(), Math.random()]
const spy = sinon.spy(async v => v * 2)
const spy = jest.fn(async v => v * 2)
const iterable = new Set(values)
// returns an array containing the result of each calls
assert.deepStrictEqual(
await asyncMapSettled(iterable, spy),
values.map(value => value * 2)
)
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
for (let i = 0, n = values.length; i < n; ++i) {
// each call receive the current item as sole argument
assert.deepStrictEqual(spy.args[i], [values[i]])
expect(spy.mock.calls[i]).toEqual([values[i]])
// each call as this bind to the iterable
assert.deepStrictEqual(spy.thisValues[i], iterable)
expect(spy.mock.instances[i]).toBe(iterable)
}
})
it('can use a specified thisArg', () => {
const thisArg = {}
const spy = sinon.spy()
const spy = jest.fn()
asyncMapSettled(['foo'], spy, thisArg)
assert.deepStrictEqual(spy.thisValues[0], thisArg)
expect(spy.mock.instances[0]).toBe(thisArg)
})
it('rejects only when all calls as resolved', async () => {
@@ -60,22 +55,19 @@ describe('asyncMapSettled', () => {
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
assert.strictEqual(hasSettled, false)
expect(hasSettled).toBe(false)
defers[1].resolve()
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
assert.strictEqual(hasSettled, true)
await assert.rejects(promise, error)
expect(hasSettled).toBe(true)
await expect(promise).rejects.toBe(error)
})
it('issues when latest promise rejects', async () => {
const error = new Error()
await assert.rejects(
asyncMapSettled([1], () => Promise.reject(error)),
error
)
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
})
})

View File

@@ -31,11 +31,6 @@
"lodash": "^4.17.4"
},
"scripts": {
"postversion": "npm publish",
"test": "node--test"
},
"devDependencies": {
"sinon": "^15.0.1",
"test": "^3.2.1"
"postversion": "npm publish"
}
}

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/audit-core):
```sh
npm install --save @xen-orchestra/audit-core
```
> npm install --save @xen-orchestra/audit-core
```
## Contributions

View File

@@ -7,7 +7,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.2.3",
"version": "0.2.0",
"engines": {
"node": ">=14"
},
@@ -17,7 +17,7 @@
},
"dependencies": {
"@vates/decorate-with": "^2.0.0",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/log": "^0.3.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
},

View File

@@ -5,6 +5,7 @@ const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
@@ -14,7 +15,7 @@ const configs = {
proposal: 'minimal',
},
'@babel/preset-env': {
debug: __PROD__,
debug: !__TEST__,
// disabled until https://github.com/babel/babel/issues/8323 is resolved
// loose: true,

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups-cli):
```sh
npm install --global @xen-orchestra/backups-cli
```
> npm install --global @xen-orchestra/backups-cli
```
## Usage

View File

@@ -1,10 +1,11 @@
import { readFileSync } from 'fs'
import getopts from 'getopts'
'use strict'
const { version } = JSON.parse(readFileSync(new URL('package.json', import.meta.url)))
const getopts = require('getopts')
export function composeCommands(commands) {
return async function (args, prefix) {
const { version } = require('./package.json')
module.exports = commands =>
async function (args, prefix) {
const opts = getopts(args, {
alias: {
help: 'h',
@@ -29,6 +30,5 @@ xo-backups v${version}
return
}
return (await command.default)(args.slice(1), prefix + ' ' + commandName)
return command.main(args.slice(1), prefix + ' ' + commandName)
}
}

View File

@@ -1,9 +1,11 @@
import fs from 'fs/promises'
import { dirname } from 'path'
'use strict'
export * from 'fs/promises'
const { dirname } = require('path')
export const getSize = path =>
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
module.exports = fs
fs.getSize = path =>
fs.stat(path).then(
_ => _.size,
error => {
@@ -14,7 +16,7 @@ export const getSize = path =>
}
)
export async function mktree(path) {
fs.mktree = async function mkdirp(path) {
try {
await fs.mkdir(path)
} catch (error) {
@@ -24,8 +26,8 @@ export async function mktree(path) {
return
}
if (code === 'ENOENT') {
await mktree(dirname(path))
return mktree(path)
await mkdirp(dirname(path))
return mkdirp(path)
}
throw error
}
@@ -35,7 +37,7 @@ export async function mktree(path) {
// - single param for direct use in `Array#map`
// - files are prefixed with directory path
// - safer: returns empty array if path is missing or not a directory
export const readdir2 = path =>
fs.readdir2 = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
@@ -57,7 +59,7 @@ export const readdir2 = path =>
}
)
export async function symlink2(target, path) {
fs.symlink2 = async (target, path) => {
try {
await fs.symlink(target, path)
} catch (error) {

View File

@@ -0,0 +1,34 @@
'use strict'
// -----------------------------------------------------------------------------
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
const getopts = require('getopts')
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
const { resolve } = require('path')
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
module.exports = async function main(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
}

View File

@@ -1,38 +0,0 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
import { getSyncedHandler } from '@xen-orchestra/fs'
import getopts from 'getopts'
import { basename, dirname } from 'path'
import Disposable from 'promise-toolbox/Disposable'
import { pathToFileURL } from 'url'
export default async function cleanVms(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, vmDir =>
Disposable.use(getSyncedHandler({ url: pathToFileURL(dirname(vmDir)).href }), async handler => {
try {
await new RemoteAdapter(handler).cleanVm(basename(vmDir), {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
)
}

View File

@@ -1,10 +1,13 @@
import { mktree, readdir2, readFile, symlink2 } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import filenamify from 'filenamify'
import get from 'lodash/get.js'
import { dirname, join, relative } from 'path'
'use strict'
export default async function createSymlinkIndex([backupDir, fieldPath]) {
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')
const { dirname, join, relative } = require('path')
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
const indexDir = join(backupDir, 'indexes', filenamify(fieldPath))
await mktree(indexDir)

View File

@@ -1,13 +1,16 @@
import { readdir2, readFile, getSize } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import { createHash } from 'crypto'
import groupBy from 'lodash/groupBy.js'
import { dirname, resolve } from 'path'
'use strict'
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const { readdir2, readFile, getSize } = require('../_fs')
const sha512 = str => createHash('sha512').update(str).digest('hex')
const sum = values => values.reduce((a, b) => a + b)
export default async function info(vmDirs) {
module.exports = async function info(vmDirs) {
const jsonFiles = (
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
).flat()

View File

@@ -1,12 +1,11 @@
#!/usr/bin/env node
import { composeCommands } from './_composeCommands.mjs'
const importDefault = async path => (await import(path)).default
'use strict'
composeCommands({
require('./_composeCommands')({
'clean-vms': {
get default() {
return importDefault('./commands/clean-vms.mjs')
get main() {
return require('./commands/clean-vms')
},
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
@@ -19,14 +18,14 @@ composeCommands({
`,
},
'create-symlink-index': {
get default() {
return importDefault('./commands/create-symlink-index.mjs')
get main() {
return require('./commands/create-symlink-index')
},
usage: 'xo-vm-backups <field path>',
},
info: {
get default() {
return importDefault('./commands/info.mjs')
get main() {
return require('./commands/info')
},
usage: 'xo-vm-backups/*',
},

View File

@@ -1,21 +1,21 @@
{
"private": false,
"bin": {
"xo-backups": "index.mjs"
"xo-backups": "index.js"
},
"preferGlobal": true,
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.32.0",
"@xen-orchestra/fs": "^3.3.2",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/fs": "^1.0.3",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0"
},
"engines": {
"node": ">=14"
"node": ">=7.10.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "1.0.2",
"version": "0.7.3",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -3,7 +3,6 @@
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const pTimeout = require('promise-toolbox/timeout')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
@@ -12,7 +11,6 @@ const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
const createStreamThrottle = require('./_createStreamThrottle.js')
const noop = Function.prototype
@@ -27,7 +25,6 @@ const getAdaptersByRemote = adapters => {
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
const DEFAULT_SETTINGS = {
getRemoteTimeout: 300e3,
reportWhen: 'failure',
}
@@ -41,13 +38,11 @@ const DEFAULT_VM_SETTINGS = {
fullInterval: 0,
healthCheckSr: undefined,
healthCheckVmsWithTags: [],
maxExportRate: 0,
maxMergedDeltasPerRun: Infinity,
maxMergedDeltasPerRun: 2,
offlineBackup: false,
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
useNbd: false,
unconditionalSnapshot: false,
vmTimeout: 0,
}
@@ -57,13 +52,6 @@ const DEFAULT_METADATA_SETTINGS = {
retentionXoMetadata: 0,
}
class RemoteTimeoutError extends Error {
constructor(remoteId) {
super('timeout while getting the remote ' + remoteId)
this.remoteId = remoteId
}
}
exports.Backup = class Backup {
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
this._config = config
@@ -71,6 +59,13 @@ exports.Backup = class Backup {
this._job = job
this._schedule = schedule
this._getAdapter = Disposable.factory(function* (remoteId) {
return {
adapter: yield getAdapter(remoteId),
remoteId,
}
})
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
'{job.name}': job.name,
'{vm.name_label}': vm => vm.name_label,
@@ -91,27 +86,6 @@ exports.Backup = class Backup {
this._baseSettings = baseSettings
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
const { getRemoteTimeout } = this._settings
this._getAdapter = async function (remoteId) {
try {
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
return new Disposable(() => disposable.dispose(), {
adapter: disposable.value,
remoteId,
})
} catch (error) {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id: remoteId },
},
() => Promise.reject(error)
)
}
}
}
async _runMetadataBackup() {
@@ -157,7 +131,20 @@ exports.Backup = class Backup {
})
)
),
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
Disposable.all(
remoteIds.map(id =>
this._getAdapter(id).catch(error => {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id },
},
() => Promise.reject(error)
)
})
)
),
async (pools, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
@@ -228,11 +215,9 @@ exports.Backup = class Backup {
// FIXME: proper SimpleIdPattern handling
const getSnapshotNameLabel = this._getSnapshotNameLabel
const schedule = this._schedule
const settings = this._settings
const throttleStream = createStreamThrottle(settings.maxExportRate)
const config = this._config
const settings = this._settings
await Disposable.use(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
@@ -247,8 +232,20 @@ exports.Backup = class Backup {
})
)
),
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
Disposable.all(
extractIdsFromSimplePattern(job.remotes).map(id =>
this._getAdapter(id).catch(error => {
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id },
},
() => Promise.reject(error)
)
})
)
),
() => settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined,
async (srs, remoteAdapters, healthCheckSr) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
@@ -282,7 +279,6 @@ exports.Backup = class Backup {
schedule,
settings: { ...settings, ...allSettings[vm.uuid] },
srs,
throttleStream,
vm,
}).run()
)

View File

@@ -8,8 +8,8 @@
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups):
```sh
npm install --save @xen-orchestra/backups
```
> npm install --save @xen-orchestra/backups
```
## Contributions

View File

@@ -10,36 +10,23 @@ const groupBy = require('lodash/groupBy.js')
const pickBy = require('lodash/pickBy.js')
const { dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const {
createVhdDirectoryFromStream,
createVhdStreamWithLength,
openVhd,
VhdAbstract,
VhdDirectory,
VhdSynthetic,
} = require('vhd-lib')
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, lstat } = require('fs-extra')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
const { watchStreamSize } = require('./_watchStreamSize')
// @todo : this import is marked extraneous , sould be fixed when lib is published
const { mount } = require('@vates/fuse-vhd')
const { asyncEach } = require('@vates/async-each')
const { strictEqual } = require('assert')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
@@ -47,7 +34,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const { warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -57,13 +44,16 @@ const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
files.push({
realPath,
metadataPath,
@@ -87,14 +77,14 @@ const debounceResourceFactory = factory =>
class RemoteAdapter {
constructor(
handler,
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, vhdDirectoryEncryption } = {}
) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._vhdDirectoryEncryption = vhdDirectoryEncryption
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
this._useGetDiskLegacy = useGetDiskLegacy
}
get handler() {
@@ -142,9 +132,7 @@ class RemoteAdapter {
}
async *_getPartition(devicePath, partition) {
// the norecovery option is necessary because if the partition is dirty,
// mount will try to fix it which is impossible if because the device is read-only
const options = ['loop', 'ro', 'norecovery']
const options = ['loop', 'ro']
if (partition !== undefined) {
const { size, start } = partition
@@ -217,8 +205,10 @@ class RemoteAdapter {
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.useVhdDirectory()
? this.#useVhdDirectory() &&
this.#getCompressionType() === vhd.compressionType &&
this.#getEncryption() === vhd.encryption
: !this.#useVhdDirectory()
})
}
@@ -241,32 +231,11 @@ class RemoteAdapter {
return promise
}
async #removeVmBackupsFromCache(backups) {
await asyncEach(
Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
),
([dir, filenames]) =>
// will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
)
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
await this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -294,8 +263,6 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
await this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -316,29 +283,33 @@ class RemoteAdapter {
full !== undefined && this.deleteFullVmBackups(full),
])
await asyncMap(new Set(files.map(file => dirname(file))), dir =>
// - don't merge in main process, unused VHDs will be merged in the next backup run
// - don't error in case this fails:
// - if lock is already being held, a backup is running and cleanVm will be ran at the end
// - otherwise, there is nothing more we can do, orphan file will be cleaned in the future
this.cleanVm(dir, { remove: true, logWarn: warn }).catch(noop)
)
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
}
#getCompressionType() {
return this._vhdDirectoryCompression
}
useVhdDirectory() {
return this.handler.useVhdDirectory()
#getEncryption() {
return this._vhdDirectoryEncryption
}
#useVhdDirectory() {
return this.handler.type === 's3'
}
#useAlias() {
return this.useVhdDirectory()
return this.#useVhdDirectory()
}
async *#getDiskLegacy(diskId) {
const RE_VHDI = /^vhdi(\d+)$/
async *getDisk(diskId) {
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
@@ -368,20 +339,6 @@ class RemoteAdapter {
}
}
async *getDisk(diskId) {
if (this._useGetDiskLegacy) {
yield* this.#getDiskLegacy(diskId)
return
}
const handler = this._handler
// this is a disposable
const mountDir = yield getTmpDir()
// this is also a disposable
yield mount(handler, diskId, mountDir)
// this will yield disk path to caller
yield `${mountDir}/vhd0`
}
// partitionId values:
//
// - undefined: raw disk
@@ -432,25 +389,18 @@ class RemoteAdapter {
listPartitionFiles(diskId, partitionId, path) {
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncEach(
await readdir(path),
async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
await asyncMap(await readdir(path), async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
},
{ concurrency: 1 }
)
}
})
return entriesMap
})
@@ -515,42 +465,11 @@ class RemoteAdapter {
return backupsByPool
}
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async _readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this._readCache(path)
if (cache !== undefined) {
fn(cache)
await this._writeCache(path, cache)
}
}
async _writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
const handler = this._handler
const backups = {}
@@ -586,26 +505,41 @@ class RemoteAdapter {
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const path = this.#getVmBackupsCache(vmUuid)
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
const cache = await this._readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
const backups = await this.#getCachabledDataListVmBackups(dir)
if (backups === undefined) {
return
}
// detached async action, will not reject
this._writeCache(path, backups)
this.#writeVmBackupsCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
@@ -644,79 +578,35 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})
// will not throw
await this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
// these values are required in the cache
_filename: path,
id: path,
}
})
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (this.useVhdDirectory()) {
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16,
compression: this.#getCompressionType(),
encryption: this.#getEncryption(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
return size
} else {
const inputWithSize = await createVhdStreamWithLength(input)
return this.outputStream(path, inputWithSize, { checksum, validator, expectedSize: inputWithSize.length })
await this.outputStream(path, input, { checksum, validator })
}
}
async outputStream(path, input, { checksum = true, validator = noop, expectedSize } = {}) {
const container = watchStreamSize(input)
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
await this._handler.outputStream(path, input, {
checksum,
dirMode: this._dirMode,
async validator() {
await input.task
if (expectedSize !== undefined) {
// check that we read all the stream
strictEqual(
container.size,
expectedSize,
`transferred size ${container.size}, expected file size : ${expectedSize}`
)
}
let size
try {
size = await this._handler.getSize(path)
} catch (err) {
// can fail is the remote is encrypted
}
if (size !== undefined) {
// check that everything is written to disk
strictEqual(size, container.size, `written size ${size}, transfered size : ${container.size}`)
}
return validator.apply(this, arguments)
},
})
return container.size
}
// open the hierarchy of ancestors until we find a full one

View File

@@ -3,10 +3,8 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
const logAfterEnd = () => {
throw new Error('task has already ended')
}
const noop = Function.prototype
@@ -100,7 +98,7 @@ class Task {
* In case of error, the task will be failed.
*
* @typedef Result
* @param {() => Result} fn
* @param {() => Result)} fn
* @param {boolean} last - Whether the task should succeed if there is no error
* @returns Result
*/

View File

@@ -55,7 +55,6 @@ class VmBackup {
schedule,
settings,
srs,
throttleStream,
vm,
}) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
@@ -83,7 +82,6 @@ class VmBackup {
this._healthCheckSr = healthCheckSr
this._jobId = job.id
this._jobSnapshots = undefined
this._throttleStream = throttleStream
this._xapi = vm.$xapi
// Base VM for the export
@@ -130,49 +128,42 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, step, parallel = true) {
async _callWriters(fn, warnMessage, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
try {
await fn(writer)
} catch (error) {
writers.delete(writer)
throw error
}
return
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await callWriter(writer)
await fn(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
// these two steps are the only one that are not already in their own sub tasks
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
Task.warning(
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
}
}
@@ -246,7 +237,6 @@ class VmBackup {
fullVdisRequired,
})
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
const timestamp = Date.now()
@@ -288,12 +278,10 @@ class VmBackup {
async _copyFull() {
const { compression } = this.job
const stream = this._throttleStream(
await this._xapi.VM_export(this.exportedVm.$ref, {
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
useSnapshot: false,
})
)
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
useSnapshot: false,
})
const sizeContainer = watchStreamSize(stream)
const timestamp = Date.now()

View File

@@ -1,8 +1,8 @@
'use strict'
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
require('@xen-orchestra/log').createLogger('xo:backups:worker')
)
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
@@ -20,8 +20,6 @@ const { Backup } = require('./Backup.js')
const { RemoteAdapter } = require('./RemoteAdapter.js')
const { Task } = require('./Task.js')
const { debug } = logger
class BackupWorker {
#config
#job
@@ -73,6 +71,7 @@ class BackupWorker {
debounceResource: this.debounceResource,
dirMode: this.#config.dirMode,
vhdDirectoryCompression: this.#config.vhdDirectoryCompression,
vhdDirectoryEncryption: this.#config.vhdDirectoryEncryption,
})
} finally {
await handler.forget()
@@ -124,11 +123,6 @@ decorateMethodsWith(BackupWorker, {
]),
})
const emitMessage = message => {
debug('message emitted', { message })
process.send(message)
}
// Received message:
//
// Message {
@@ -146,8 +140,6 @@ const emitMessage = message => {
// result?: any
// }
process.on('message', async message => {
debug('message received', { message })
if (message.action === 'run') {
const backupWorker = new BackupWorker(message.data)
try {
@@ -156,7 +148,7 @@ process.on('message', async message => {
{
name: 'backup run',
onLog: data =>
emitMessage({
process.send({
data,
type: 'log',
}),
@@ -165,13 +157,13 @@ process.on('message', async message => {
)
: await backupWorker.run()
emitMessage({
process.send({
type: 'result',
result,
status: 'success',
})
} catch (error) {
emitMessage({
process.send({
type: 'result',
result: error,
status: 'failure',

View File

@@ -1,7 +1,6 @@
'use strict'
const { beforeEach, afterEach, test, describe } = require('test')
const assert = require('assert').strict
/* eslint-env jest */
const rimraf = require('rimraf')
const tmp = require('tmp')
@@ -15,8 +14,9 @@ const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
const rootPath = 'xo-vm-backups/VMUUID/'
let tempDir, adapter, handler, jobId, vdiId, basePath
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
@@ -25,18 +25,17 @@ beforeEach(async () => {
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
relativePath = `vdis/${jobId}/${vdiId}`
basePath = `${rootPath}/${relativePath}`
basePath = `vdis/${jobId}/${vdiId}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
afterEach(async () => {
await rimraf(tempDir)
await pFromCallback(cb => rimraf(tempDir, cb))
await handler.forget()
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
async function generateVhd(path, opts = {}) {
let vhd
@@ -77,18 +76,18 @@ test('It remove broken vhd', async () => {
// todo also tests a directory and an alias
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
assert.equal((await handler.list(basePath)).length, 1)
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message
}
await adapter.cleanVm(rootPath, { remove: false, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued, `VHD check error`)
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
// not removed
assert.deepEqual(await handler.list(basePath), ['notReallyAVhd.vhd'])
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
assert.deepEqual(await handler.list(basePath), [])
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
})
test('it remove vhd with missing or multiple ancestors', async () => {
@@ -119,13 +118,15 @@ test('it remove vhd with missing or multiple ancestors', async () => {
)
// clean
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm('/', { remove: true, onLog })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
assert.equal(deletedOrphanVhd.length, 1) // only one vhd should have been deleted
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
@@ -133,12 +134,12 @@ test('it remove vhd with missing or multiple ancestors', async () => {
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${relativePath}/orphan.vhd`,
`${relativePath}/child.vhd`,
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
],
})
@@ -158,42 +159,44 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
})
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
assert.equal(matched.length, 1) // only one vhd should have been deleted
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`deleted.vhd`, // in metadata but not in vhds
`orphan.vhd`,
`child.vhd`,
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
assert.equal(matched.length, 2) // all vhds (orphan and child ) should have been deleted
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 12000, // a size too small
vhds: [
`${relativePath}/grandchild.vhd`, // grand child should not be merged
`${relativePath}/child.vhd`,
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
@@ -217,36 +220,36 @@ test('it merges delta of non destroyed chain', async () => {
})
let loggued = []
const logInfo = message => {
const onLog = message => {
loggued.push(message)
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued[0], `unexpected number of entries in backup cache`)
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [merging] = loggued
assert.equal(merging, `merging VHD chain`)
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
assert.equal(metadata.size, 209920)
expect(metadata.size).toEqual(209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
assert.equal(remainingVhds.length, 2)
assert.equal(remainingVhds.includes('child.vhd'), true)
assert.equal(remainingVhds.includes('grandchild.vhd'), true)
expect(remainingVhds.length).toEqual(2)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
})
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [`${relativePath}/orphan.vhd`, `${relativePath}/child.vhd`],
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
})
)
@@ -272,13 +275,13 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
assert.equal(remainingVhds.length, 1)
assert.equal(remainingVhds.includes('child.vhd'), true)
expect(remainingVhds.length).toEqual(1)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
})
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
@@ -368,34 +371,22 @@ describe('tests multiple combination ', () => {
// the metadata file
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${relativePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${relativePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${relativePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
if (!useAlias && vhdMode === 'directory') {
try {
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
} catch (err) {
assert.strictEqual(
err.code,
'NOT_SUPPORTED',
'Merging directory without alias should raise a not supported error'
)
return
}
assert.strictEqual(true, false, 'Merging directory without alias should raise an error')
}
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
await adapter.cleanVm('/', { remove: true, merge: true })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
@@ -405,19 +396,19 @@ describe('tests multiple combination ', () => {
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
assert.equal(dataSurvivors.includes('ancestor.vhd'), true)
assert.equal(dataSurvivors.includes('grandchild.vhd'), true)
assert.equal(dataSurvivors.includes('cleanAncestor.vhd'), true)
assert.equal(survivors.includes('clean.vhd.alias.vhd'), true)
assert.equal(survivors.includes('child.vhd.alias.vhd'), true)
assert.equal(survivors.includes('grandchild.vhd.alias.vhd'), true)
assert.equal(survivors.length, 4) // the 3 ok + data
assert.equal(dataSurvivors.length, 3)
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
} else {
assert.equal(survivors.includes('clean.vhd'), true)
assert.equal(survivors.includes('child.vhd'), true)
assert.equal(survivors.includes('grandchild.vhd'), true)
assert.equal(survivors.length, 3)
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
expect(survivors).toContain('grandchild.vhd')
expect(survivors.length).toEqual(3)
}
})
}
@@ -427,9 +418,9 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm(rootPath, { remove: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true })
assert.deepEqual(await handler.list(basePath), [])
expect(await handler.list(basePath)).toEqual([])
})
test('check Aliases should work alone', async () => {
@@ -442,16 +433,12 @@ test('check Aliases should work alone', async () => {
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
assert.equal(alias.length, 1)
expect(alias.length).toEqual(1)
const data = await handler.list('vhds/data')
assert.equal(data.length, 1)
expect(data.length).toEqual(1)
})

View File

@@ -1,27 +1,22 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
function shouldComputeVhdsSize(vhds) {
return vhds.every(vhd => vhd instanceof VhdFile)
}
@@ -29,49 +24,73 @@ const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(handler, vhds)) {
if (shouldComputeVhdsSize(vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
// chain is [ ancestor, child1, ..., childn]
// 1. Create a VhdSynthetic from all children
// 2. Merge the VhdSynthetic into the ancestor
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor with the merged data to the latest child
//
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, childn ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
assert(chain.length >= 2)
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
if (merge) {
logInfo(`merging VHD chain`, { chain })
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total})
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
const mergedSize = await mergeVhd(handler, parent, handler, children, {
onProgress({ done: d, total: t }) {
done = d
total = t
},
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
logInfo(`the VHD child is already merged`, { child })
if (remove) {
logInfo(`deleting merged VHD child`, { child })
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
}
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir, logWarn) => {
const listVhds = async (handler, vmDir) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
@@ -91,23 +110,12 @@ const listVhds = async (handler, vmDir, logWarn) => {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
await asyncMap(list, async file => {
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
} else {
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
}
})
}
@@ -123,15 +131,15 @@ async function checkAliases(
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
logWarn('alias references non VHD target', { path, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
logInfo('removing alias and non VHD target', { path, target })
await handler.unlink(target)
await handler.unlink(alias)
await handler.unlink(path)
}
continue
}
@@ -144,13 +152,13 @@ async function checkAliases(
// error during dispose should not trigger a deletion
}
} catch (error) {
logWarn('missing or broken alias target', { alias, target, error })
logWarn('missing or broken alias target', { target, path, error })
if (remove) {
try {
await VhdAbstract.unlink(handler, alias)
await VhdAbstract.unlink(handler, path)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { alias, target, error })
logWarn('error deleting alias target', { target, path, error })
}
}
}
@@ -160,17 +168,17 @@ async function checkAliases(
aliasFound.push(resolve('/', target))
}
const vhds = await handler.list(targetDataRepository, {
const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
logWarn('no alias references VHD', { entry })
if (remove) {
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
logInfo('deleting unaliased VHD')
await VhdAbstract.unlink(handler, entry)
}
}
})
@@ -182,26 +190,17 @@ const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhds, async path => {
@@ -219,31 +218,12 @@ exports.cleanVm = async function cleanVm(
}
vhdChildren[parent] = path
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
})
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
if (error?.code === 'ERR_ASSERTION' && remove) {
logInfo('deleting broken VHD', { path })
logInfo('deleting broken path', { path })
return VhdAbstract.unlink(handler, path)
}
}
@@ -252,7 +232,7 @@ exports.cleanVm = async function cleanVm(
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const { statePath } = interruptedVhds.get(interruptedVhd)
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
logWarn('orphan merge state', {
@@ -291,9 +271,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
logWarn('parent VHD is missing', { parent, child: vhdPath })
logWarn('parent VHD is missing', { parent, vhdPath })
if (remove) {
logInfo('deleting orphan VHD', { path: vhdPath })
logInfo('deleting orphan VHD', { vhdPath })
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -326,20 +306,6 @@ exports.cleanVm = async function cleanVm(
}
})
const cachePath = vmDir + '/cache.json.gz'
let mustRegenerateCache
{
const cache = await this._readCache(cachePath)
const actual = cache === undefined ? 0 : Object.keys(cache).length
const expected = jsons.size
mustRegenerateCache = actual !== expected
if (mustRegenerateCache) {
logWarn('unexpected number of entries in backup cache', { path: cachePath, actual, expected })
}
}
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
@@ -351,8 +317,6 @@ exports.cleanVm = async function cleanVm(
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
const backups = new Map()
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
@@ -360,21 +324,23 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
logWarn('failed to read backup metadata', { path: json, error })
logWarn('failed to read metadata file', { json, error })
jsons.delete(json)
return
}
let isBackupComplete
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
isBackupComplete = xvas.has(linkedXva)
if (isBackupComplete) {
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
logWarn('metadata XVA is missing', { json })
if (remove) {
logInfo('deleting incomplete backup', { json })
jsons.delete(json)
await handler.unlink(json)
}
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
@@ -383,28 +349,21 @@ exports.cleanVm = async function cleanVm(
})()
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
isBackupComplete = missingVhds.length === 0
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (isBackupComplete) {
if (missingVhds.length === 0) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
linkedVhds.forEach(path => {
vhdsToJSons[path] = json
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
}
}
if (isBackupComplete) {
backups.set(json, metadata)
} else {
jsons.delete(json)
if (remove) {
logInfo('deleting incomplete backup', { backup: json })
mustRegenerateCache = true
await handler.unlink(json)
logWarn('some metadata VHDs are missing', { json, missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { json })
jsons.delete(json)
await handler.unlink(json)
}
}
}
})
@@ -413,7 +372,7 @@ exports.cleanVm = async function cleanVm(
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -437,14 +396,14 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.unshift(vhd)
chain.push(vhd)
return chain
}
}
logWarn('unused VHD', { path: vhd })
logWarn('unused VHD', { vhd })
if (remove) {
logInfo('deleting unused VHD', { path: vhd })
logInfo('deleting unused VHD', { vhd })
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -455,13 +414,7 @@ exports.cleanVm = async function cleanVm(
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
}
Object.values(vhdChainsToMerge).forEach(chain => {
@@ -474,15 +427,9 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
@@ -514,7 +461,7 @@ exports.cleanVm = async function cleanVm(
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = backups.get(metadataPath)
const metadata = JSON.parse(await handler.readFile(metadataPath))
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
@@ -525,11 +472,7 @@ exports.cleanVm = async function cleanVm(
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
fileSystemSize = await handler.getSize(linkedXva)
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
@@ -541,43 +484,25 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
console.warn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
}
}
} catch (error) {
logWarn('failed to get backup size', { backup: metadataPath, error })
logWarn('failed to get metadata size', { metadataPath, error })
return
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
mustRegenerateCache = true
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
logWarn('metadata size update failed', { metadataPath, error })
}
}
})
if (mustRegenerateCache) {
const cache = {}
for (const [path, content] of backups.entries()) {
cache[path] = {
_filename: path,
id: path,
...content,
}
}
await this._writeCache(cachePath, cache)
}
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,17 +0,0 @@
'use strict'
const { pipeline } = require('node:stream')
const { ThrottleGroup } = require('@kldzj/stream-throttle')
const identity = require('lodash/identity.js')
const noop = Function.prototype
module.exports = function createStreamThrottle(rate) {
if (rate === 0) {
return identity
}
const group = new ThrottleGroup({ rate })
return function throttleStream(stream) {
return pipeline(stream, group.createThrottle(), noop)
}
}

View File

@@ -1,18 +1,18 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { compareVersions } = require('compare-versions')
const { createVhdStreamWithLength } = require('vhd-lib')
const { defer } = require('golike-defer')
const { cancelableMap } = require('./_cancelableMap.js')
const { Task } = require('./Task.js')
const pick = require('lodash/pick.js')
const { pick } = require('lodash')
const TAG_BASE_DELTA = 'xo:base_delta'
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
@@ -258,9 +258,6 @@ exports.importDeltaVm = defer(async function importDeltaVm(
$defer.onFailure(() => newVdi.$destroy())
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
if (vdi.virtual_size > newVdi.virtual_size) {
await newVdi.$callAsync('resize', vdi.virtual_size)
}
} else if (vdiRef === vmRecord.suspend_VDI) {
// suspendVDI has already created
newVdi = suspendVdi

View File

@@ -1,36 +1,30 @@
'use strict'
const { finished, PassThrough } = require('node:stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
// from the original one
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
const { forks = 0 } = source
source.forks = forks + 1
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: source.forks })
const fork = new PassThrough()
source.pipe(fork)
finished(source, { writable: false }, error => {
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
fork.destroy(error)
proxy.destroy(error)
}
})
finished(fork, { readable: false }, error => {
debug('end of stream, unpiping', { error, forks: --source.forks })
eos(proxy, _ => {
stream.forks--
stream.unpipe(proxy)
source.unpipe(fork)
if (source.forks === 0) {
debug('no more forks, destroying original stream')
source.destroy(new Error('no more consumers for this stream'))
if (stream.forks === 0) {
stream.destroy(new Error('no more consumers for this stream'))
}
})
return fork
return proxy
}

View File

@@ -49,11 +49,6 @@ const isValidTar = async (handler, size, fd) => {
// TODO: find an heuristic for compressed files
async function isValidXva(path) {
const handler = this._handler
// size is longer when encrypted + reading part of an encrypted file is not implemented
if (handler.isEncrypted) {
return true
}
try {
const fd = await handler.openFile(path, 'r')
try {
@@ -71,6 +66,7 @@ async function isValidXva(path) {
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}

View File

@@ -14,14 +14,12 @@
## File structure on remote
### with vhd files
```
<remote>
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ cache.json.gz
├─ index.json // TODO
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
@@ -32,31 +30,6 @@
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
### with vhd directories
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
```
<vdis>/<job UUID>/<VDI UUID>
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
└─ data
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
└─ <uuid>.vhd
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
Add the following file: `xo-vm-backups/<VM UUID>/cache.json.gz`.
This cache is compressed in Gzip and contains an JSON object with the metadata for all the backups of this VM indexed by their absolute path (i.e. `/xo-vm-backups/<VM UUID>/<timestamp>.json`).
This file is generated on demande when listing the backups, and directly updated on backup creation/deletion.
In case any incoherence is detected, the file is deleted so it will be fully generated when required.
## Attributes
### Of created snapshots

View File

@@ -4,7 +4,7 @@
'use strict'
const { catchGlobalErrors } = require('@xen-orchestra/log/configure')
const { catchGlobalErrors } = require('@xen-orchestra/log/configure.js')
const { createLogger } = require('@xen-orchestra/log')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { join } = require('path')
@@ -64,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {

View File

@@ -8,51 +8,45 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.32.0",
"version": "0.25.0",
"engines": {
"node": ">=14.6"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"postversion": "npm publish --access public"
},
"dependencies": {
"@kldzj/stream-throttle": "^1.1.1",
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.4",
"@vates/fuse-vhd": "^1.0.0",
"@vates/nbd-client": "^1.0.1",
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.3.2",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^5.0.1",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"fs-extra": "^11.1.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
"golike-defer": "^0.5.1",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^2.0.0",
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^9.0.0",
"vhd-lib": "^4.2.1",
"uuid": "^8.3.2",
"vhd-lib": "^3.2.0",
"yazl": "^2.5.1"
},
"devDependencies": {
"rimraf": "^4.1.1",
"sinon": "^15.0.1",
"test": "^3.2.1",
"rimraf": "^3.0.2",
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^2.0.0"
"@xen-orchestra/xapi": "^1.2.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -12,7 +12,7 @@ exports.runBackupWorker = function runBackupWorker(params, onLog) {
return new Promise((resolve, reject) => {
const worker = fork(PATH)
worker.on('exit', (code, signal) => reject(new Error(`worker exited with code ${code} and signal ${signal}`)))
worker.on('exit', code => reject(new Error(`worker exited with code ${code}`)))
worker.on('error', reject)
worker.on('message', message => {

View File

@@ -7,12 +7,11 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { asyncMap } = require('@xen-orchestra/async-map')
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { dirname } = require('path')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getOldEntries } = require('../_getOldEntries.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { Task } = require('../Task.js')
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
@@ -20,24 +19,25 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const NbdClient = require('@vates/nbd-client')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { debug, warn, info } = createLogger('xo:backups:DeltaBackupWriter')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
async checkBaseVdis(baseUuidToSrcVdi) {
const { handler } = this._adapter
const backup = this._backup
const adapter = this._adapter
const vdisDir = `${this._vmBackupDir}/vdis/${backup.job.id}`
const backupDir = getVmBackupDir(backup.vm.uuid)
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
let found = false
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
@@ -71,6 +71,35 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
return this._cleanVm({ merge: true })
}
healthCheck(sr) {
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -135,7 +164,7 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
}
}
async _transfer($defer, { timestamp, deltaExport }) {
async _transfer({ timestamp, deltaExport, sizeContainers }) {
const adapter = this._adapter
const backup = this._backup
@@ -143,6 +172,7 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
const jobId = job.id
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -159,6 +189,7 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataContent = {
jobId,
mode: job.mode,
@@ -174,10 +205,9 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
}
const { size } = await Task.run({ name: 'transfer' }, async () => {
let transferSize = 0
await Promise.all(
map(deltaExport.vdis, async (vdi, id) => {
const path = `${this._vmBackupDir}/${vhds[id]}`
const path = `${backupDir}/${vhds[id]}`
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
let parentPath
@@ -200,41 +230,11 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
await checkVhd(handler, parentPath)
}
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
if (this._backup.config.useNbd && adapter.useVhdDirectory()) {
debug('useNbd is enabled', { vdi: id, path })
// get nbd if possible
try {
// this will always take the first host in the list
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
debug('got NBD info', { nbdInfo, vdi: id, path })
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
// this will inform the xapi that we don't need this anymore
// and will detach the vdi from dom0
$defer(() => nbdClient.disconnect())
info('NBD client ready', { vdi: id, path })
Task.info('NBD used')
} catch (error) {
Task.warning('NBD configured but unusable', { error })
nbdClient = undefined
warn('error connecting to NBD server', { error, vdi: id, path })
}
} else {
debug('useNbd is disabled', { vdi: id, path })
}
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {
@@ -249,14 +249,15 @@ class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
})
})
)
return { size: transferSize }
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
})
metadataContent.size = size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
// TODO: run cleanup?
}
}
exports.DeltaBackupWriter = decorateClass(DeltaBackupWriter, {
_transfer: defer,
})

View File

@@ -80,7 +80,6 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
}
async _transfer({ timestamp, deltaExport, sizeContainers }) {
const { _warmMigration } = this._settings
const sr = this._sr
const { job, scheduleId, vm } = this._backup
@@ -93,7 +92,7 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
__proto__: deltaExport,
vm: {
...deltaExport.vm,
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
tags: [...deltaExport.vm.tags, 'Continuous Replication'],
},
},
sr
@@ -102,13 +101,11 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
})
this._targetVmRef = targetVmRef
const targetVm = await xapi.getRecord('VM', targetVmRef)
await Promise.all([
// warm migration does not disable HA , since the goal is to start the new VM in production
!_warmMigration &&
targetVm.ha_restart_priority !== '' &&
targetVm.ha_restart_priority !== '' &&
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
asyncMap(['start', 'start_on'], op =>

View File

@@ -2,6 +2,7 @@
const { formatFilenameDate } = require('../_filenameDate.js')
const { getOldEntries } = require('../_getOldEntries.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { Task } = require('../Task.js')
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
@@ -33,6 +34,8 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -45,8 +48,9 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const basename = formatFilenameDate(timestamp)
const dataBasename = basename + '.xva'
const dataFilename = this._vmBackupDir + '/' + dataBasename
const dataFilename = backupDir + '/' + dataBasename
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,
@@ -70,7 +74,9 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
if (!deleteFirst) {
await deleteOldBackups()

Some files were not shown because too many files have changed in this diff Show More