Compare commits

..

55 Commits

Author SHA1 Message Date
Mathieu
ae087a6539 feat(xo-lite/ObjectStatus): use ProgressCircle component (#6207) 2022-04-28 09:59:59 +02:00
Mathieu
4db93f8ced feat(lite/ProgressCircle): creation of the component (#6128) 2022-04-26 17:18:41 +02:00
Rajaa.BARHTAOUI
e5c737cba7 feat(lite/pool): dashboard Status card (#6112) 2022-04-21 16:22:49 +02:00
Rajaa.BARHTAOUI
9f0f38ef94 feat(lite): add Tabs component (#6096) 2022-04-08 10:13:26 +02:00
Rajaa.BARHTAOUI
d76996b1d5 feat(lite/tree): auto-reveal active VM (#6088) 2022-04-07 15:43:48 +02:00
Rajaa.BARHTAOUI
3b77897692 feat(lite): update PanelHeader to match mockup (#6111) 2022-03-23 16:57:10 +01:00
Mathieu
d4ed555abd feat(lite/console): handle case where host sends self-signed certificate (#6104) 2022-02-18 15:36:18 +01:00
Mathieu
97d77c0aa5 fix(lite/Select): fix controlled input with undefined value (#6106)
Fix `MUI: The 'value' prop must be an array when using the 'Select' component with 'multiple'.` in case we toggle the `multiple` prop `false -> true` with a controlled input initialized with undefined value.
2022-02-17 15:45:17 +01:00
Rajaa.BARHTAOUI
a9ad0ec455 feat(lite): change pool icon (#6110) 2022-02-03 09:34:25 +01:00
Rajaa.BARHTAOUI
78ec008c26 feat(lite/Icon): possibility to use colors of theme's palette (#6114) 2022-02-02 14:35:56 +01:00
Pierre Donias
2d71bef5d8 chore(lite): handle asset relative paths in prod and dev environment (#6109)
- Make asset URLs relative
- Add a base tag in production to make all the URLs relative to
  lite.xen-orchestra.com (change made directly on the server)
2022-01-31 11:18:12 +01:00
Mathieu
3ec7c61987 fix(lite/tree): navigate with keyboard into tree-view (#6069) 2022-01-27 11:27:22 +01:00
Mathieu
526c2001d3 fix(lite/console): console cropped on window vertical resize (#6077)
Introduced by f3d4e40c6d
2022-01-20 14:11:38 +01:00
Florent BEAUCHAMP
f3d4e40c6d feat(xo-lite): implement Title bar component (#5960) 2021-12-15 17:07:56 +01:00
Mathieu
ac8f93fb0e feat(lite/ActionButton): creation of the ActionButton component (#6021) 2021-12-09 16:34:37 +01:00
Rajaa.BARHTAOUI
d2fbc1b573 fix(lite/Tree,TreeView): fix type errors (#6011) 2021-12-09 10:56:40 +01:00
Rajaa.BARHTAOUI
c5670a047f feat(lite): sort hosts by name_label (#6046) 2021-12-08 16:45:48 +01:00
Mathieu
e9472889f2 feat(xo-lite/Modal): creation of the Modal component (#5775) 2021-12-02 10:07:00 +01:00
Rajaa.BARHTAOUI
9bec4b571c fix(lite/tree): clicking next to VM name should work (#6005) 2021-11-30 16:18:53 +01:00
Rajaa.BARHTAOUI
b56cc96e37 feat(lite): sort VMs by name_label (#5989) 2021-11-30 15:41:55 +01:00
Rajaa.BARHTAOUI
011164f16c feat(lite/tree): highlight selected node (#5939)
Inspired by https://mui.com/components/tree-view/#contentcomponent-prop
2021-11-16 17:05:53 +01:00
Mathieu
b9a9471408 feat(xo-lite): creation of Select component (#5878) 2021-11-10 10:33:33 +01:00
Florent BEAUCHAMP
9abd1429a2 feat(lint-staged): apply validation rules to ts and tsx files (#5985)
See https://github.com/vatesfr/xen-orchestra/pull/5960#discussion_r738332567
2021-11-08 15:05:07 +01:00
Mathieu
7f656973de feat(xo-lite/Input): creation of Input component (#5975) 2021-11-05 16:30:56 +01:00
Mathieu
5e0766fcb1 feat(xo-lite/Button): replace styled component to mui-button (#5964) 2021-11-05 16:22:20 +01:00
Mathieu
2dc5c0e161 fix(lite): console scaling (#5933) 2021-10-15 17:34:55 +02:00
Pierre Donias
d0730d05fd chore(lite): update xen-api (#5945) 2021-10-12 15:52:03 +02:00
Pierre Donias
8fe3a439fc chore(lite): uninstall @material-ui/core (#5928)
Use @mui/material instead
2021-10-04 14:27:05 +02:00
Pierre Donias
12c7113662 fix(lite): use absolute assets URLs 2021-09-30 17:29:05 +02:00
Pierre Donias
36be46b073 feat(lite): add UI template and prepare for initial release (#5922) 2021-09-30 15:03:28 +02:00
Rajaa.BARHTAOUI
25ef579df5 feat(lite): Tree view (#5804) 2021-09-30 15:02:59 +02:00
Mathieu
cbbb07d389 feat(lite): list pool updates (#5794) 2021-09-30 15:02:58 +02:00
Pierre Donias
96df84c9d8 fix(lite): fix credentials error type (#5845) 2021-09-30 15:02:58 +02:00
Pierre Donias
17c4b5cbe7 feat(lite): show version in UI (#5844) 2021-09-30 15:02:58 +02:00
Mathieu
cf642cd720 feat(xo-lite/Pool): display IP, DNS, gateway from management PIF (#5771) 2021-09-30 15:02:58 +02:00
Mathieu
047f3a9b4c feat(xo-lite): use styled-components for console component (#5827) 2021-09-30 15:02:58 +02:00
Mathieu
b0f85e0380 feat(xo-lite/Console): handle disconnection and halted VMs (#5728) 2021-09-30 15:02:58 +02:00
Mathieu
7aa518b43c feat(xo-lite): wrapper for FormattedMessage (#5803) 2021-09-30 15:02:58 +02:00
Pierre Donias
d187d6aeeb chore(lite): allow explicit any 2021-09-30 15:02:58 +02:00
Pierre Donias
289dce3876 chore(lite/types): handle async computed 2021-09-30 15:02:58 +02:00
Pierre Donias
930afea1a1 feat(lite): signin page (#5787) 2021-09-30 15:02:58 +02:00
Mathieu
3801fa9134 feat(lite/Console): add CtrlAltDel button (#5722) 2021-09-30 15:02:58 +02:00
Julien Fontanet
ae211046b8 fix(lite): don't let Babel transpile import/export 2021-09-30 15:02:58 +02:00
Julien Fontanet
87ce9ff63a fix(lite): blacklist dns module
It's used by `xen-api` but should be fine as long as `reverseHostIpAddresses` is not enable.
2021-09-30 15:02:58 +02:00
Pierre Donias
131c6321be chore(xo-lite): fix config and xen-api 2021-09-30 15:02:58 +02:00
Pierre Donias
6abcce498f feat(xo-lite): style guide (#5764) 2021-09-30 15:02:21 +02:00
Pierre Donias
9c38f5b327 feat(xo-lite): styled-components 2021-09-30 15:02:00 +02:00
Pierre Donias
14720d4cbf chore(xo-lite): move all dependencies to devDependencies 2021-09-30 15:01:38 +02:00
Mathieu
940ef2845d feat(xo-lite/Console): ability to scale VM console (#5703) 2021-09-30 15:01:38 +02:00
Pierre Donias
e3dbb7a6c2 fix(xo-lite/novnc): remove types 2021-09-30 15:01:38 +02:00
Pierre Donias
8cba6ebb20 fix(xo-lite/novnc): use @types/novnc-core
See https://github.com/DefinitelyTyped/DefinitelyTyped/pull/18602
2021-09-30 15:01:37 +02:00
Pierre Donias
a1b322f5be chore(xo-lite): update xen-api 2021-09-30 15:01:37 +02:00
Pierre Donias
07ff19c4b8 feat(xo-lite): Reaclette types 2021-09-30 15:01:06 +02:00
Pierre Donias
3a0af4e7e0 fix(xo-lite/console): get console URL from XAPI 2021-09-30 15:01:06 +02:00
Pierre Donias
dbb3f74ab0 feat(xo-lite): initial commit 2021-09-30 15:01:06 +02:00
747 changed files with 28725 additions and 40995 deletions

View File

@@ -1,7 +1,5 @@
'use strict'
module.exports = {
extends: ['plugin:eslint-comments/recommended', 'plugin:n/recommended', 'standard', 'standard-jsx', 'prettier'],
extends: ['plugin:eslint-comments/recommended', 'standard', 'standard-jsx', 'prettier'],
globals: {
__DEV__: true,
$Dict: true,
@@ -17,40 +15,11 @@ module.exports = {
{
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
rules: {
'n/no-process-exit': 'off',
'no-console': 'off',
},
},
{
files: ['*.mjs'],
parserOptions: {
sourceType: 'module',
},
},
{
files: ['*.spec.{,c,m}js'],
rules: {
'n/no-unsupported-features/node-builtins': [
'error',
{
version: '>=16',
},
],
'n/no-unsupported-features/es-syntax': [
'error',
{
version: '>=16',
},
],
},
},
],
parserOptions: {
ecmaVersion: 13,
sourceType: 'script',
},
rules: {
// disabled because XAPI objects are using camel case
camelcase: ['off'],
@@ -65,7 +34,5 @@ module.exports = {
'lines-between-class-members': 'off',
'no-console': ['error', { allow: ['warn', 'error'] }],
strict: 'error',
},
}

16
.flowconfig Normal file
View File

@@ -0,0 +1,16 @@
[ignore]
<PROJECT_ROOT>/node_modules/.*
[include]
[libs]
[lints]
[options]
esproposal.decorators=ignore
esproposal.optional_chaining=enable
include_warnings=true
module.use_strict=true
[strict]

View File

@@ -1,46 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
**XOA or XO from the sources?**
If XOA:
- which release channel? (`stable` vs `latest`)
- please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
If XO from the sources:
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please provide the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**
Add any other context about the problem here.

View File

@@ -1,19 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -1,13 +0,0 @@
name: CI
on: [push]
jobs:
build:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- run: docker-compose -f docker/docker-compose.dev.yml build
- run: docker-compose -f docker/docker-compose.dev.yml up

9
.gitignore vendored
View File

@@ -1,4 +1,5 @@
/_book/
/coverage/
/node_modules/
/lerna-debug.log
/lerna-debug.log.*
@@ -10,6 +11,10 @@
/packages/*/dist/
/packages/*/node_modules/
/@xen-orchestra/proxy/src/app/mixins/index.mjs
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat
@@ -30,7 +35,3 @@ pnpm-debug.log.*
yarn-error.log
yarn-error.log.*
.env
# code coverage
.nyc_output/
coverage/

23
.travis.yml Normal file
View File

@@ -0,0 +1,23 @@
language: node_js
node_js:
- 14
# Use containers.
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
sudo: false
addons:
apt:
packages:
- qemu-utils
- blktap-utils
- vmdk-stream-converter
before_install:
- curl -o- -L https://yarnpkg.com/install.sh | bash
- export PATH="$HOME/.yarn/bin:$PATH"
cache:
yarn: true
script:
- yarn run travis-tests

View File

@@ -1,35 +0,0 @@
### `asyncEach(iterable, iteratee, [opts])`
Executes `iteratee` in order for each value yielded by `iterable`.
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
`iterable` must be an iterable or async iterable.
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
- `value`: the value yielded by `iterable`
- `index`: the 0-based index for this value
- `iterable`: the iterable itself
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
```js
import { asyncEach } from '@vates/async-each'
const contents = []
await asyncEach(
['foo.txt', 'bar.txt', 'baz.txt'],
async function (filename, i) {
contents[i] = await readFile(filename)
},
{
// reads two files at a time
concurrency: 2,
}
)
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,68 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/async-each
[![Package Version](https://badgen.net/npm/v/@vates/async-each)](https://npmjs.org/package/@vates/async-each) ![License](https://badgen.net/npm/license/@vates/async-each) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/async-each)](https://bundlephobia.com/result?p=@vates/async-each) [![Node compatibility](https://badgen.net/npm/node/@vates/async-each)](https://npmjs.org/package/@vates/async-each)
> Run async fn for each item in (async) iterable
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
```
> npm install --save @vates/async-each
```
## Usage
### `asyncEach(iterable, iteratee, [opts])`
Executes `iteratee` in order for each value yielded by `iterable`.
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
`iterable` must be an iterable or async iterable.
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
- `value`: the value yielded by `iterable`
- `index`: the 0-based index for this value
- `iterable`: the iterable itself
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
```js
import { asyncEach } from '@vates/async-each'
const contents = []
await asyncEach(
['foo.txt', 'bar.txt', 'baz.txt'],
async function (filename, i) {
contents[i] = await readFile(filename)
},
{
// reads two files at a time
concurrency: 2,
}
)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,108 +0,0 @@
'use strict'
const noop = Function.prototype
class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []
let running = 0
let index = 0
let onAbort
if (signal !== undefined) {
onAbort = () => {
onRejectedWrapper(new Error('asyncEach aborted'))
}
signal.addEventListener('abort', onAbort)
}
const clean = () => {
onFulfilled = onRejected = noop
if (onAbort !== undefined) {
signal.removeEventListener('abort', onAbort)
}
}
resolve = (resolve =>
function resolveAndClean(value) {
resolve(value)
clean()
})(resolve)
reject = (reject =>
function rejectAndClean(reason) {
reject(reason)
clean()
})(reject)
let onFulfilled = value => {
--running
next()
}
const onFulfilledWrapper = value => onFulfilled(value)
let onRejected = stopOnError
? reject
: error => {
--running
errors.push(error)
next()
}
const onRejectedWrapper = reason => onRejected(reason)
let nextIsRunning = false
let next = async () => {
if (nextIsRunning) {
return
}
nextIsRunning = true
if (running < concurrency) {
const cursor = await it.next()
if (cursor.done) {
next = () => {
if (running === 0) {
if (errors.length === 0) {
resolve()
} else {
reject(new AggregateError(errors))
}
}
}
} else {
++running
try {
const result = iteratee.call(this, cursor.value, index++, iterable)
let then
if (result != null && typeof result === 'object' && typeof (then = result.then) === 'function') {
then.call(result, onFulfilledWrapper, onRejectedWrapper)
} else {
onFulfilled(result)
}
} catch (error) {
onRejected(error)
}
}
nextIsRunning = false
return next()
}
nextIsRunning = false
}
next()
})
}

View File

@@ -1,101 +0,0 @@
'use strict'
/* eslint-env jest */
const { asyncEach } = require('./')
const randomDelay = (max = 10) =>
new Promise(resolve => {
setTimeout(resolve, Math.floor(Math.random() * max + 1))
})
const rejectionOf = p =>
new Promise((resolve, reject) => {
p.then(reject, resolve)
})
describe('asyncEach', () => {
const thisArg = 'qux'
const values = ['foo', 'bar', 'baz']
Object.entries({
'sync iterable': () => values,
'async iterable': async function* () {
for (const value of values) {
await randomDelay()
yield value
}
},
}).forEach(([what, getIterable]) =>
describe('with ' + what, () => {
let iterable
beforeEach(() => {
iterable = getIterable()
})
it('works', async () => {
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
;[1, 2, 4].forEach(concurrency => {
it('respects a concurrency of ' + concurrency, async () => {
let running = 0
await asyncEach(
values,
async () => {
++running
expect(running).toBeLessThanOrEqual(concurrency)
await randomDelay()
--running
},
{ concurrency }
)
})
})
it('stops on first error when stopOnError is true', async () => {
const error = new Error()
const iteratee = jest.fn((_, i) => {
if (i === 1) {
throw error
}
})
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
it('rejects AggregateError when stopOnError is false', async () => {
const errors = []
const iteratee = jest.fn(() => {
const error = new Error()
errors.push(error)
throw error
})
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
expect(error.errors).toEqual(errors)
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
it('can be interrupted with an AbortSignal', async () => {
const ac = new AbortController()
const iteratee = jest.fn((_, i) => {
if (i === 1) {
ac.abort()
}
})
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
'asyncEach aborted'
)
expect(iteratee).toHaveBeenCalledTimes(2)
})
})
)
})

View File

@@ -1,34 +0,0 @@
{
"private": false,
"name": "@vates/async-each",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/async-each",
"description": "Run async fn for each item in (async) iterable",
"keywords": [
"array",
"async",
"collection",
"each",
"for",
"foreach",
"iterable",
"iterator"
],
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/async-each",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1,30 +0,0 @@
Node does not cache queries to `dns.lookup`, which can lead application doing a lot of connections to have perf issues and to saturate Node threads pool.
This library attempts to mitigate these problems by providing a version of this function with a version short cache, applied on both errors and results.
> Limitation: `verbatim: false` option is not supported.
It has exactly the same API as the native method and can be used directly:
```js
import { createCachedLookup } from '@vates/cached-dns.lookup'
const lookup = createCachedLookup()
lookup('example.net', { all: true, family: 0 }, (error, result) => {
if (error != null) {
return console.warn(error)
}
console.log(result)
})
```
Or it can be used to replace the native implementation and speed up the whole app:
```js
// assign our cached implementation to dns.lookup
const restore = createCachedLookup().patchGlobal()
// to restore the previous implementation
restore()
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,63 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/cached-dns.lookup
[![Package Version](https://badgen.net/npm/v/@vates/cached-dns.lookup)](https://npmjs.org/package/@vates/cached-dns.lookup) ![License](https://badgen.net/npm/license/@vates/cached-dns.lookup) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/cached-dns.lookup)](https://bundlephobia.com/result?p=@vates/cached-dns.lookup) [![Node compatibility](https://badgen.net/npm/node/@vates/cached-dns.lookup)](https://npmjs.org/package/@vates/cached-dns.lookup)
> Cached implementation of dns.lookup
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/cached-dns.lookup):
```
> npm install --save @vates/cached-dns.lookup
```
## Usage
Node does not cache queries to `dns.lookup`, which can lead application doing a lot of connections to have perf issues and to saturate Node threads pool.
This library attempts to mitigate these problems by providing a version of this function with a version short cache, applied on both errors and results.
> Limitation: `verbatim: false` option is not supported.
It has exactly the same API as the native method and can be used directly:
```js
import { createCachedLookup } from '@vates/cached-dns.lookup'
const lookup = createCachedLookup()
lookup('example.net', { all: true, family: 0 }, (error, result) => {
if (error != null) {
return console.warn(error)
}
console.log(result)
})
```
Or it can be used to replace the native implementation and speed up the whole app:
```js
// assign our cached implementation to dns.lookup
const restore = createCachedLookup().patchGlobal()
// to restore the previous implementation
restore()
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,72 +0,0 @@
'use strict'
const assert = require('assert')
const dns = require('dns')
const LRU = require('lru-cache')
function reportResults(all, results, callback) {
if (all) {
callback(null, results)
} else {
const first = results[0]
callback(null, first.address, first.family)
}
}
exports.createCachedLookup = function createCachedLookup({ lookup = dns.lookup } = {}) {
const cache = new LRU({
max: 500,
// 1 minute: long enough to be effective, short enough so there is no need to bother with DNS TTLs
ttl: 60e3,
})
function cachedLookup(hostname, options, callback) {
let all = false
let family = 0
if (typeof options === 'function') {
callback = options
} else if (typeof options === 'number') {
family = options
} else if (options != null) {
assert.notStrictEqual(options.verbatim, false, 'not supported by this implementation')
;({ all = all, family = family } = options)
}
// cache by family option because there will be an error if there is no
// entries for the requestion family so we cannot easily cache all families
// and filter on reporting back
const key = hostname + '/' + family
const result = cache.get(key)
if (result !== undefined) {
setImmediate(reportResults, all, result, callback)
} else {
lookup(hostname, { all: true, family, verbatim: true }, function onLookup(error, results) {
// errors are not cached because this will delay recovery after DNS/network issues
//
// there are no reliable way to detect if the error is real or simply
// that there are no results for the requested hostname
//
// there should be much fewer errors than success, therefore it should
// not be a big deal to not cache them
if (error != null) {
return callback(error)
}
cache.set(key, results)
reportResults(all, results, callback)
})
}
}
cachedLookup.patchGlobal = function patchGlobal() {
const previous = dns.lookup
dns.lookup = cachedLookup
return function restoreGlobal() {
assert.strictEqual(dns.lookup, cachedLookup)
dns.lookup = previous
}
}
return cachedLookup
}

View File

@@ -1,32 +0,0 @@
{
"engines": {
"node": ">=8"
},
"dependencies": {
"lru-cache": "^7.0.4"
},
"private": false,
"name": "@vates/cached-dns.lookup",
"description": "Cached implementation of dns.lookup",
"keywords": [
"cache",
"dns",
"lookup"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/cached-dns.lookup",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/cached-dns.lookup",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1,5 +1,3 @@
'use strict'
exports.coalesceCalls = function (fn) {
let promise
const clean = () => {

View File

@@ -1,5 +1,3 @@
'use strict'
/* eslint-env jest */
const { coalesceCalls } = require('./')

View File

@@ -65,23 +65,6 @@ const f = compose(
)
```
Functions can receive extra parameters:
```js
const isIn = (value, min, max) => min <= value && value <= max
// Only compatible when `fns` is passed as an array!
const f = compose([
[add, 2],
[isIn, 3, 10],
])
console.log(f(1))
// → true
```
> Note: if the first function is defined with extra parameters, it will only receive the first value passed to the composed function, instead of all the parameters.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -46,20 +46,3 @@ const f = compose(
[add2, mul3]
)
```
Functions can receive extra parameters:
```js
const isIn = (value, min, max) => min <= value && value <= max
// Only compatible when `fns` is passed as an array!
const f = compose([
[add, 2],
[isIn, 3, 10],
])
console.log(f(1))
// → true
```
> Note: if the first function is defined with extra parameters, it will only receive the first value passed to the composed function, instead of all the parameters.

View File

@@ -4,13 +4,11 @@ const defaultOpts = { async: false, right: false }
exports.compose = function compose(opts, fns) {
if (Array.isArray(opts)) {
fns = opts.slice() // don't mutate passed array
fns = opts
opts = defaultOpts
} else if (typeof opts === 'object') {
opts = Object.assign({}, defaultOpts, opts)
if (Array.isArray(fns)) {
fns = fns.slice() // don't mutate passed array
} else {
if (!Array.isArray(fns)) {
fns = Array.prototype.slice.call(arguments, 1)
}
} else {
@@ -22,24 +20,6 @@ exports.compose = function compose(opts, fns) {
if (n === 0) {
throw new TypeError('at least one function must be passed')
}
for (let i = 0; i < n; ++i) {
const entry = fns[i]
if (Array.isArray(entry)) {
const fn = entry[0]
const args = entry.slice()
args[0] = undefined
fns[i] = function composeWithArgs(value) {
args[0] = value
try {
return fn.apply(this, args)
} finally {
args[0] = undefined
}
}
}
}
if (n === 1) {
return fns[0]
}

View File

@@ -1,5 +1,3 @@
'use strict'
/* eslint-env jest */
const { compose } = require('./')

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "2.1.0",
"version": "2.0.0",
"engines": {
"node": ">=7.6"
},

View File

@@ -1,86 +0,0 @@
### `decorateWith(fn, ...args)`
Creates a new ([legacy](https://babeljs.io/docs/en/babel-plugin-syntax-decorators#legacy)) method decorator from a function decorator, for instance, allows using Lodash's functions as decorators:
```js
import { decorateWith } from '@vates/decorate-with'
class Foo {
@decorateWith(lodash.debounce, 150)
bar() {
// body
}
}
```
### `decorateClass(class, map)`
Decorates a number of accessors and methods directly, without using the decorator syntax:
```js
import { decorateClass } from '@vates/decorate-with'
class Foo {
get bar() {
// body
}
set bar(value) {
// body
}
baz() {
// body
}
}
decorateClass(Foo, {
// getter and/or setter
bar: {
// without arguments
get: lodash.memoize,
// with arguments
set: [lodash.debounce, 150],
},
// method (with or without arguments)
baz: lodash.curry,
})
```
The decorated class is returned, so you can export it directly.
To apply multiple transforms to an accessor/method, you can either call `decorateClass` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
```js
decorateClass(Foo, {
baz: compose([
[lodash.debounce, 150]
lodash.curry,
])
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.
This is often necessary for caching or deduplicating calls.
```js
import { perInstance } from '@vates/decorateWith'
class Foo {
@decorateWith(perInstance, lodash.memoize)
bar() {
// body
}
}
```
Because it's a normal function, it can also be used with `decorateClass`, with `compose` or even by itself.
### `decorateMethodsWith(class, map)`
> Deprecated alias for [`decorateClass(class, map)`](#decorateclassclass-map).

View File

@@ -31,19 +31,15 @@ class Foo {
}
```
### `decorateClass(class, map)`
### `decorateMethodsWith(class, map)`
Decorates a number of accessors and methods directly, without using the decorator syntax:
Decorates a number of methods directly, without using the decorator syntax:
```js
import { decorateClass } from '@vates/decorate-with'
import { decorateMethodsWith } from '@vates/decorate-with'
class Foo {
get bar() {
// body
}
set bar(value) {
bar() {
// body
}
@@ -52,57 +48,17 @@ class Foo {
}
}
decorateClass(Foo, {
// getter and/or setter
bar: {
// without arguments
get: lodash.memoize,
decorateMethodsWith(Foo, {
// without arguments
bar: lodash.curry,
// with arguments
set: [lodash.debounce, 150],
},
// method (with or without arguments)
baz: lodash.curry,
// with arguments
baz: [lodash.debounce, 150],
})
```
The decorated class is returned, so you can export it directly.
To apply multiple transforms to an accessor/method, you can either call `decorateClass` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
```js
decorateClass(Foo, {
baz: compose([
[lodash.debounce, 150]
lodash.curry,
])
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.
This is often necessary for caching or deduplicating calls.
```js
import { perInstance } from '@vates/decorateWith'
class Foo {
@decorateWith(perInstance, lodash.memoize)
bar() {
// body
}
}
```
Because it's a normal function, it can also be used with `decorateClass`, with `compose` or even by itself.
### `decorateMethodsWith(class, map)`
> Deprecated alias for [`decorateClass(class, map)`](#decorateclassclass-map).
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -0,0 +1,42 @@
### `decorateWith(fn, ...args)`
Creates a new ([legacy](https://babeljs.io/docs/en/babel-plugin-syntax-decorators#legacy)) method decorator from a function decorator, for instance, allows using Lodash's functions as decorators:
```js
import { decorateWith } from '@vates/decorate-with'
class Foo {
@decorateWith(lodash.debounce, 150)
bar() {
// body
}
}
```
### `decorateMethodsWith(class, map)`
Decorates a number of methods directly, without using the decorator syntax:
```js
import { decorateMethodsWith } from '@vates/decorate-with'
class Foo {
bar() {
// body
}
baz() {
// body
}
}
decorateMethodsWith(Foo, {
// without arguments
bar: lodash.curry,
// with arguments
baz: [lodash.debounce, 150],
})
```
The decorated class is returned, so you can export it directly.

View File

@@ -1,5 +1,3 @@
'use strict'
exports.decorateWith = function decorateWith(fn, ...args) {
return (target, name, descriptor) => ({
...descriptor,
@@ -9,40 +7,15 @@ exports.decorateWith = function decorateWith(fn, ...args) {
const { getOwnPropertyDescriptor, defineProperty } = Object
function applyDecorator(decorator, value) {
return typeof decorator === 'function' ? decorator(value) : decorator[0](value, ...decorator.slice(1))
}
exports.decorateClass = exports.decorateMethodsWith = function decorateClass(klass, map) {
exports.decorateMethodsWith = function decorateMethodsWith(klass, map) {
const { prototype } = klass
for (const name of Object.keys(map)) {
const decorator = map[name]
const descriptor = getOwnPropertyDescriptor(prototype, name)
if (typeof decorator === 'function' || Array.isArray(decorator)) {
descriptor.value = applyDecorator(decorator, descriptor.value)
} else {
const { get, set } = decorator
if (get !== undefined) {
descriptor.get = applyDecorator(get, descriptor.get)
}
if (set !== undefined) {
descriptor.set = applyDecorator(set, descriptor.set)
}
}
const { value } = descriptor
const decorator = map[name]
descriptor.value = typeof decorator === 'function' ? decorator(value) : decorator[0](value, ...decorator.slice(1))
defineProperty(prototype, name, descriptor)
}
return klass
}
exports.perInstance = function perInstance(fn, decorator, ...args) {
const map = new WeakMap()
return function () {
let decorated = map.get(this)
if (decorated === undefined) {
decorated = decorator(fn, ...args)
map.set(this, decorated)
}
return decorated.apply(this, arguments)
}
}

View File

@@ -1,152 +0,0 @@
'use strict'
const assert = require('assert')
const { describe, it } = require('tap').mocha
const { decorateClass, decorateWith, decorateMethodsWith, perInstance } = require('./')
const identity = _ => _
describe('decorateWith', () => {
it('works', () => {
const expectedArgs = [Math.random(), Math.random()]
const expectedFn = Function.prototype
const newFn = () => {}
const decorator = decorateWith(function wrapper(fn, ...args) {
assert.deepStrictEqual(fn, expectedFn)
assert.deepStrictEqual(args, expectedArgs)
return newFn
}, ...expectedArgs)
const descriptor = {
configurable: true,
enumerable: false,
value: expectedFn,
writable: true,
}
assert.deepStrictEqual(decorator({}, 'foo', descriptor), {
...descriptor,
value: newFn,
})
})
})
describe('decorateClass', () => {
it('works', () => {
class C {
foo() {}
bar() {}
get baz() {}
// eslint-disable-next-line accessor-pairs
set qux(_) {}
}
const expectedArgs = [Math.random(), Math.random()]
const P = C.prototype
const descriptors = Object.getOwnPropertyDescriptors(P)
const newFoo = () => {}
const newBar = () => {}
const newGetBaz = () => {}
const newSetQux = _ => {}
decorateClass(C, {
foo(fn) {
assert.strictEqual(arguments.length, 1)
assert.strictEqual(fn, P.foo)
return newFoo
},
bar: [
function (fn, ...args) {
assert.strictEqual(fn, P.bar)
assert.deepStrictEqual(args, expectedArgs)
return newBar
},
...expectedArgs,
],
baz: {
get(fn) {
assert.strictEqual(arguments.length, 1)
assert.strictEqual(fn, descriptors.baz.get)
return newGetBaz
},
},
qux: {
set: [
function (fn, ...args) {
assert.strictEqual(fn, descriptors.qux.set)
assert.deepStrictEqual(args, expectedArgs)
return newSetQux
},
...expectedArgs,
],
},
})
const newDescriptors = Object.getOwnPropertyDescriptors(P)
assert.deepStrictEqual(newDescriptors.foo, { ...descriptors.foo, value: newFoo })
assert.deepStrictEqual(newDescriptors.bar, { ...descriptors.bar, value: newBar })
assert.deepStrictEqual(newDescriptors.baz, { ...descriptors.baz, get: newGetBaz })
assert.deepStrictEqual(newDescriptors.qux, { ...descriptors.qux, set: newSetQux })
})
it('throws if using an accessor decorator for a method', function () {
assert.throws(() =>
decorateClass(
class {
foo() {}
},
{ foo: { get: identity, set: identity } }
)
)
})
it('throws if using a method decorator for an accessor', function () {
assert.throws(() =>
decorateClass(
class {
get foo() {}
},
{ foo: identity }
)
)
})
})
it('decorateMethodsWith is an alias of decorateClass', function () {
assert.strictEqual(decorateMethodsWith, decorateClass)
})
describe('perInstance', () => {
it('works', () => {
let calls = 0
const expectedArgs = [Math.random(), Math.random()]
const expectedFn = Function.prototype
function wrapper(fn, ...args) {
assert.strictEqual(fn, expectedFn)
assert.deepStrictEqual(args, expectedArgs)
const i = ++calls
return () => i
}
const wrapped = perInstance(expectedFn, wrapper, ...expectedArgs)
// decorator is not called before decorated called
assert.strictEqual(calls, 0)
const o1 = {}
const o2 = {}
assert.strictEqual(wrapped.call(o1), 1)
// the same decorated function is returned for the same instance
assert.strictEqual(wrapped.call(o1), 1)
// a new decorated function is returned for another instance
assert.strictEqual(wrapped.call(o2), 2)
})
})

View File

@@ -20,15 +20,11 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "2.0.0",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^16.0.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,5 +1,3 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')

View File

@@ -1,5 +1,3 @@
'use strict'
/* eslint-env jest */
const { createDebounceResource } = require('./debounceResource')

View File

@@ -1,5 +1,3 @@
'use strict'
const ensureArray = require('ensure-array')
const { MultiKeyMap } = require('@vates/multi-key-map')

View File

@@ -1,5 +1,3 @@
'use strict'
/* eslint-env jest */
const { deduped } = require('./deduped')

View File

@@ -1,50 +0,0 @@
> This library is compatible with Node's `EventEmitter` and web browsers' `EventTarget` APIs.
### API
```js
import { EventListenersManager } from '@vates/event-listeners-manager'
const events = new EventListenersManager(emitter)
// adding listeners
events.add('foo', onFoo).add('bar', onBar).on('baz', onBaz)
// removing a specific listener
events.remove('foo', onFoo)
// removing all listeners for a specific event
events.removeAll('foo')
// removing all listeners
events.removeAll()
```
### Typical use case
> Removing all listeners when no longer necessary.
Manually:
```js
const onFoo = () => {}
const onBar = () => {}
const onBaz = () => {}
emitter.on('foo', onFoo).on('bar', onBar).on('baz', onBaz)
// CODE LOGIC
emitter.off('foo', onFoo).off('bar', onBar).off('baz', onBaz)
```
With this library:
```js
const events = new EventListenersManager(emitter)
events.add('foo', () => {})).add('bar', () => {})).add('baz', () => {}))
// CODE LOGIC
events.removeAll()
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,81 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/event-listeners-manager
[![Package Version](https://badgen.net/npm/v/@vates/event-listeners-manager)](https://npmjs.org/package/@vates/event-listeners-manager) ![License](https://badgen.net/npm/license/@vates/event-listeners-manager) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/event-listeners-manager)](https://bundlephobia.com/result?p=@vates/event-listeners-manager) [![Node compatibility](https://badgen.net/npm/node/@vates/event-listeners-manager)](https://npmjs.org/package/@vates/event-listeners-manager)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/event-listeners-manager):
```
> npm install --save @vates/event-listeners-manager
```
## Usage
> This library is compatible with Node's `EventEmitter` and web browsers' `EventTarget` APIs.
### API
```js
import { EventListenersManager } from '@vates/event-listeners-manager'
const events = new EventListenersManager(emitter)
// adding listeners
events.add('foo', onFoo).add('bar', onBar).on('baz', onBaz)
// removing a specific listener
events.remove('foo', onFoo)
// removing all listeners for a specific event
events.removeAll('foo')
// removing all listeners
events.removeAll()
```
### Typical use case
> Removing all listeners when no longer necessary.
Manually:
```js
const onFoo = () => {}
const onBar = () => {}
const onBaz = () => {}
emitter.on('foo', onFoo).on('bar', onBar).on('baz', onBaz)
// CODE LOGIC
emitter.off('foo', onFoo).off('bar', onBar).off('baz', onBaz)
```
With this library:
```js
const events = new EventListenersManager(emitter)
events.add('foo', () => {})).add('bar', () => {})).add('baz', () => {}))
// CODE LOGIC
events.removeAll()
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,56 +0,0 @@
'use strict'
exports.EventListenersManager = class EventListenersManager {
constructor(emitter) {
this._listeners = new Map()
this._add = (emitter.addListener || emitter.addEventListener).bind(emitter)
this._remove = (emitter.removeListener || emitter.removeEventListener).bind(emitter)
}
add(type, listener) {
let listeners = this._listeners.get(type)
if (listeners === undefined) {
listeners = new Set()
this._listeners.set(type, listeners)
}
// don't add the same listener multiple times (allowed on Node.js)
if (!listeners.has(listener)) {
listeners.add(listener)
this._add(type, listener)
}
return this
}
remove(type, listener) {
const allListeners = this._listeners
const listeners = allListeners.get(type)
if (listeners !== undefined && listeners.delete(listener)) {
this._remove(type, listener)
if (listeners.size === 0) {
allListeners.delete(type)
}
}
return this
}
removeAll(type) {
const allListeners = this._listeners
const remove = this._remove
const types = type !== undefined ? [type] : allListeners.keys()
for (const type of types) {
const listeners = allListeners.get(type)
if (listeners !== undefined) {
allListeners.delete(type)
for (const listener of listeners) {
remove(type, listener)
}
}
}
return this
}
}

View File

@@ -1,67 +0,0 @@
'use strict'
const t = require('tap')
const { EventEmitter } = require('events')
const { EventListenersManager } = require('./')
const noop = Function.prototype
// function spy (impl = Function.prototype) {
// function spy() {
// spy.calls.push([Array.from(arguments), this])
// }
// spy.calls = []
// return spy
// }
function assertListeners(t, event, listeners) {
t.strictSame(t.context.ee.listeners(event), listeners)
}
t.beforeEach(function (t) {
t.context.ee = new EventEmitter()
t.context.em = new EventListenersManager(t.context.ee)
})
t.test('.add adds a listener', function (t) {
t.context.em.add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.add does not add a duplicate listener', function (t) {
t.context.em.add('foo', noop).add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.remove removes a listener', function (t) {
t.context.em.add('foo', noop).remove('foo', noop)
assertListeners(t, 'foo', [])
t.end()
})
t.test('.removeAll removes all listeners of a given type', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [noop])
t.end()
})
t.test('.removeAll removes all listeners', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll()
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [])
t.end()
})

View File

@@ -1,46 +0,0 @@
{
"engines": {
"node": ">=6"
},
"private": false,
"name": "@vates/event-listeners-manager",
"descriptions": "Easy way to clean up event listeners",
"keywords": [
"add",
"addEventListener",
"addListener",
"browser",
"clear",
"DOM",
"emitter",
"event",
"EventEmitter",
"EventTarget",
"management",
"manager",
"node",
"remove",
"removeEventListener",
"removeListener"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/event-listeners-manager",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/event-listeners-manager",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.1",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --branches=72"
},
"devDependencies": {
"tap": "^16.2.0"
}
}

View File

@@ -1,5 +1,3 @@
'use strict'
class Node {
constructor(value) {
this.children = new Map()

View File

@@ -1,5 +1,3 @@
'use strict'
/* eslint-env jest */
const { MultiKeyMap } = require('./')

View File

@@ -1,5 +1,3 @@
'use strict'
const ms = require('ms')
exports.parseDuration = value => {

View File

@@ -1,57 +0,0 @@
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
const compositePredicate = predicate2
```
Predicates can also be passed wrapped in an array:
```js
const compositePredicate = every([predicate1, some([predicate2, predicate3])])
```
`this` and all arguments are passed to the nested predicates.
### `every(predicates)`
> Returns a predicate that returns `true` iff every predicate returns `true`.
```js
const isBetween3And7 = every(
n => n >= 3,
n => n <= 7
)
isBetween3And10(0)
// → false
isBetween3And10(5)
// → true
isBetween3And10(10)
// → false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.
```js
const isAliceOrBob = some(
name => name === 'Alice',
name => name === 'Bob'
)
isAliceOrBob('Alice')
// → true
isAliceOrBob('Bob')
// → true
isAliceOrBob('Oscar')
// → false
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,90 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/predicates
[![Package Version](https://badgen.net/npm/v/@vates/predicates)](https://npmjs.org/package/@vates/predicates) ![License](https://badgen.net/npm/license/@vates/predicates) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/predicates)](https://bundlephobia.com/result?p=@vates/predicates) [![Node compatibility](https://badgen.net/npm/node/@vates/predicates)](https://npmjs.org/package/@vates/predicates)
> Utilities to compose predicates
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
```
> npm install --save @vates/predicates
```
## Usage
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
const compositePredicate = predicate2
```
Predicates can also be passed wrapped in an array:
```js
const compositePredicate = every([predicate1, some([predicate2, predicate3])])
```
`this` and all arguments are passed to the nested predicates.
### `every(predicates)`
> Returns a predicate that returns `true` iff every predicate returns `true`.
```js
const isBetween3And7 = every(
n => n >= 3,
n => n <= 7
)
isBetween3And10(0)
// → false
isBetween3And10(5)
// → true
isBetween3And10(10)
// → false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.
```js
const isAliceOrBob = some(
name => name === 'Alice',
name => name === 'Bob'
)
isAliceOrBob('Alice')
// → true
isAliceOrBob('Bob')
// → true
isAliceOrBob('Oscar')
// → false
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,71 +0,0 @@
'use strict'
const {
isArray,
prototype: { filter },
} = Array
class InvalidPredicate extends TypeError {
constructor(value) {
super('not a valid predicate')
this.value = value
}
}
function isDefinedPredicate(value) {
if (value === undefined) {
return false
}
if (typeof value !== 'function') {
throw new InvalidPredicate(value)
}
return true
}
function handleArgs() {
let predicates
if (!(arguments.length === 1 && isArray((predicates = arguments[0])))) {
predicates = arguments
}
return filter.call(predicates, isDefinedPredicate)
}
exports.every = function every() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length
if (n === 0) {
return
}
if (n === 1) {
return predicates[0]
}
return function everyPredicate() {
for (let i = 0; i < n; ++i) {
if (!predicates[i].apply(this, arguments)) {
return false
}
}
return true
}
}
exports.some = function some() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length
if (n === 0) {
return
}
if (n === 1) {
return predicates[0]
}
return function somePredicate() {
for (let i = 0; i < n; ++i) {
if (predicates[i].apply(this, arguments)) {
return true
}
}
return false
}
}

View File

@@ -1,65 +0,0 @@
'use strict'
const assert = require('assert/strict')
const { describe, it } = require('tap').mocha
const { every, some } = require('./')
const T = () => true
const F = () => false
const testArgsHandling = fn => {
it('returns undefined if all predicates are undefined', () => {
assert.equal(fn(undefined), undefined)
assert.equal(fn([undefined]), undefined)
})
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('throws if it receives a non-predicate', () => {
const error = new TypeError('not a valid predicate')
error.value = 3
assert.throws(() => fn(3), error)
})
it('forwards this and arguments to predicates', () => {
const thisArg = 'qux'
const args = ['foo', 'bar', 'baz']
const predicate = function () {
assert.equal(this, thisArg)
assert.deepEqual(Array.from(arguments), args)
}
fn(predicate, predicate).apply(thisArg, args)
})
}
const runTests = (fn, truthTable) =>
it('works', () => {
truthTable.forEach(([result, ...predicates]) => {
assert.equal(fn(...predicates)(), result)
assert.equal(fn(predicates)(), result)
})
})
describe('every', () => {
testArgsHandling(every)
runTests(every, [
[true, T, T],
[false, T, F],
[false, F, T],
[false, F, F],
])
})
describe('some', () => {
testArgsHandling(some)
runTests(some, [
[true, T, T],
[true, T, F],
[true, F, T],
[false, F, F],
])
})

View File

@@ -1,40 +0,0 @@
{
"private": false,
"name": "@vates/predicates",
"description": "Utilities to compose predicates",
"keywords": [
"and",
"combine",
"compose",
"every",
"function",
"functions",
"or",
"predicate",
"predicates",
"some"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/predicates",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/predicates",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=6"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^16.0.1"
}
}

View File

@@ -1,26 +0,0 @@
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
import { readChunk } from '@vates/read-chunk'
;(async () => {
let chunk
while ((chunk = await readChunk(stream, 1024)) !== null) {
// do something with chunk
}
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```

View File

@@ -16,12 +16,9 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
## Usage
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
- returns `null` if the stream has ended
```js
import { readChunk } from '@vates/read-chunk'
@@ -33,16 +30,6 @@ import { readChunk } from '@vates/read-chunk'
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -0,0 +1,13 @@
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns `null` if the stream has ended
```js
import { readChunk } from '@vates/read-chunk'
;(async () => {
let chunk
while ((chunk = await readChunk(stream, 1024)) !== null) {
// do something with chunk
}
})()
```

View File

@@ -1,5 +1,3 @@
'use strict'
const readChunk = (stream, size) =>
size === 0
? Promise.resolve(Buffer.alloc(0))
@@ -30,22 +28,3 @@ const readChunk = (stream, size) =>
onReadable()
})
exports.readChunk = readChunk
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {
throw new Error('stream has ended without data')
}
if (size !== undefined && chunk.length !== size) {
const error = new Error('stream has ended with not enough data')
Object.defineProperties(error, {
chunk: {
value: chunk,
},
})
throw error
}
return chunk
}

View File

@@ -1,10 +1,8 @@
'use strict'
/* eslint-env jest */
const { Readable } = require('stream')
const { readChunk, readChunkStrict } = require('./')
const { readChunk } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
@@ -43,27 +41,3 @@ describe('readChunk', () => {
})
})
})
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended without data')
expect(error.chunk).toEqual(undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended with not enough data')
expect(error.chunk).toEqual(Buffer.from('foobar'))
})
})

View File

@@ -19,7 +19,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.0.0",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},

View File

@@ -1,7 +1,5 @@
#!/usr/bin/env node
'use strict'
const fs = require('fs')
const mapKeys = (object, iteratee) => {

View File

@@ -1,5 +1,3 @@
'use strict'
const wrapCall = (fn, arg, thisArg) => {
try {
return Promise.resolve(fn.call(thisArg, arg))

View File

@@ -1,5 +1,3 @@
'use strict'
/* eslint-env jest */
const { asyncMapSettled } = require('./')

View File

@@ -1,5 +1,3 @@
'use strict'
// type MaybePromise<T> = Promise<T> | T
//
// declare export function asyncMap<T1, T2>(

View File

@@ -0,0 +1 @@
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -9,14 +9,28 @@
},
"version": "0.2.0",
"engines": {
"node": ">=14"
"node": ">=10"
},
"main": "dist/",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"postversion": "npm publish --access public",
"test": "tap --lines 67 --functions 92 --branches 52 --statements 67"
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"devDependencies": {
"@babel/cli": "^7.7.4",
"@babel/core": "^7.7.4",
"@babel/plugin-proposal-decorators": "^7.8.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.0",
"@babel/preset-env": "^7.7.4",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/decorate-with": "^2.0.0",
"@vates/decorate-with": "^0.1.0",
"@xen-orchestra/log": "^0.3.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
@@ -26,8 +40,5 @@
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"devDependencies": {
"tap": "^16.0.1"
}
}

View File

@@ -1,14 +1,12 @@
'use strict'
const assert = require('assert')
const hash = require('object-hash')
const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
import assert from 'assert'
import hash from 'object-hash'
import { createLogger } from '@xen-orchestra/log'
import { decorateWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
const log = createLogger('xo:audit-core')
exports.Storage = class Storage {
export class Storage {
constructor() {
this._lock = Promise.resolve()
}
@@ -31,7 +29,7 @@ const ID_TO_ALGORITHM = {
5: 'sha256',
}
class AlteredRecordError extends Error {
export class AlteredRecordError extends Error {
constructor(id, nValid, record) {
super('altered record')
@@ -40,9 +38,8 @@ class AlteredRecordError extends Error {
this.record = record
}
}
exports.AlteredRecordError = AlteredRecordError
class MissingRecordError extends Error {
export class MissingRecordError extends Error {
constructor(id, nValid) {
super('missing record')
@@ -50,10 +47,8 @@ class MissingRecordError extends Error {
this.nValid = nValid
}
}
exports.MissingRecordError = MissingRecordError
const NULL_ID = 'nullId'
exports.NULL_ID = NULL_ID
export const NULL_ID = 'nullId'
const HASH_ALGORITHM_ID = '5'
const createHash = (data, algorithmId = HASH_ALGORITHM_ID) =>
@@ -62,12 +57,13 @@ const createHash = (data, algorithmId = HASH_ALGORITHM_ID) =>
excludeKeys: key => key === 'id',
})}`
class AuditCore {
export class AuditCore {
constructor(storage) {
assert.notStrictEqual(storage, undefined)
this._storage = storage
}
@decorateWith(defer)
async add($defer, subject, event, data) {
const time = Date.now()
$defer(await this._storage.acquireLock())
@@ -152,6 +148,7 @@ class AuditCore {
}
}
@decorateWith(defer)
async deleteRangeAndRewrite($defer, newest, oldest) {
assert.notStrictEqual(newest, undefined)
assert.notStrictEqual(oldest, undefined)
@@ -192,9 +189,3 @@ class AuditCore {
}
}
}
exports.AuditCore = AuditCore
decorateClass(AuditCore, {
add: defer,
deleteRangeAndRewrite: defer,
})

View File

@@ -1,9 +1,6 @@
'use strict'
/* eslint-env jest */
const assert = require('assert/strict')
const { afterEach, describe, it } = require('tap').mocha
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')
import { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } from '.'
const asyncIteratorToArray = async asyncIterator => {
const array = []
@@ -75,7 +72,7 @@ const auditCore = new AuditCore(db)
const storeAuditRecords = async () => {
await Promise.all(DATA.map(data => auditCore.add(...data)))
const records = await asyncIteratorToArray(auditCore.getFrom())
assert.equal(records.length, DATA.length)
expect(records.length).toBe(DATA.length)
return records
}
@@ -86,11 +83,10 @@ describe('auditCore', () => {
const [newestRecord, deletedRecord] = await storeAuditRecords()
const nValidRecords = await auditCore.checkIntegrity(NULL_ID, newestRecord.id)
assert.equal(nValidRecords, DATA.length)
expect(nValidRecords).toBe(DATA.length)
await db.del(deletedRecord.id)
await assert.rejects(
auditCore.checkIntegrity(NULL_ID, newestRecord.id),
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
new MissingRecordError(deletedRecord.id, 1)
)
})
@@ -101,8 +97,7 @@ describe('auditCore', () => {
alteredRecord.event = ''
await db.put(alteredRecord)
await assert.rejects(
auditCore.checkIntegrity(NULL_ID, newestRecord.id),
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
new AlteredRecordError(alteredRecord.id, 1, alteredRecord)
)
})
@@ -112,8 +107,8 @@ describe('auditCore', () => {
await auditCore.deleteFrom(secondRecord.id)
assert.equal(await db.get(firstRecord.id), undefined)
assert.equal(await db.get(secondRecord.id), undefined)
expect(await db.get(firstRecord.id)).toBe(undefined)
expect(await db.get(secondRecord.id)).toBe(undefined)
await auditCore.checkIntegrity(secondRecord.id, thirdRecord.id)
})

View File

@@ -46,7 +46,7 @@ module.exports = function (pkg, configs = {}) {
return {
comments: !__PROD__,
ignore: __PROD__ ? [/\btests?\//, /\.spec\.js$/] : undefined,
ignore: __PROD__ ? [/\.spec\.js$/] : undefined,
plugins: Object.keys(plugins)
.map(plugin => [plugin, plugins[plugin]])
.sort(([a], [b]) => {
@@ -56,14 +56,22 @@ module.exports = function (pkg, configs = {}) {
}),
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
targets: (() => {
const targets = {}
if (pkg.browserslist !== undefined) {
targets.browsers = pkg.browserslist
}
let node = (pkg.engines || {}).node
if (node !== undefined) {
const trimChars = '^=>~'
while (trimChars.includes(node[0])) {
node = node.slice(1)
}
targets.node = node
}
return { browsers: pkg.browserslist, node }
return targets
})(),
}
}

View File

@@ -10,7 +10,7 @@
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=8.3"
"node": ">=6"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -1,5 +1,3 @@
'use strict'
const getopts = require('getopts')
const { version } = require('./package.json')

View File

@@ -1,5 +1,3 @@
'use strict'
const { dirname } = require('path')
const fs = require('promise-toolbox/promisifyAll')(require('fs'))

View File

@@ -1,4 +1,4 @@
'use strict'
#!/usr/bin/env node
// -----------------------------------------------------------------------------
@@ -26,13 +26,7 @@ module.exports = async function main(args) {
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}

View File

@@ -1,5 +1,3 @@
'use strict'
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')

View File

@@ -1,5 +1,3 @@
'use strict'
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')

View File

@@ -1,7 +1,5 @@
#!/usr/bin/env node
'use strict'
require('./_composeCommands')({
'clean-vms': {
get main() {

View File

@@ -7,12 +7,12 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/fs": "^0.18.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0"
"promise-toolbox": "^0.19.2"
},
"engines": {
"node": ">=7.10.1"
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.7",
"version": "0.6.0",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -1,12 +1,10 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const Disposable = require('promise-toolbox/Disposable.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')
@@ -24,34 +22,6 @@ const getAdaptersByRemote = adapters => {
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
const DEFAULT_SETTINGS = {
reportWhen: 'failure',
}
const DEFAULT_VM_SETTINGS = {
bypassVdiChainsCheck: false,
checkpointSnapshot: false,
concurrency: 2,
copyRetention: 0,
deleteFirst: false,
exportRetention: 0,
fullInterval: 0,
healthCheckSr: undefined,
healthCheckVmsWithTags: [],
maxMergedDeltasPerRun: 2,
offlineBackup: false,
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
unconditionalSnapshot: false,
vmTimeout: 0,
}
const DEFAULT_METADATA_SETTINGS = {
retentionPoolMetadata: 0,
retentionXoMetadata: 0,
}
exports.Backup = class Backup {
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
this._config = config
@@ -70,22 +40,17 @@ exports.Backup = class Backup {
'{job.name}': job.name,
'{vm.name_label}': vm => vm.name_label,
})
}
const { type } = job
const baseSettings = { ...DEFAULT_SETTINGS }
run() {
const type = this._job.type
if (type === 'backup') {
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
this.run = this._runVmBackup
return this._runVmBackup()
} else if (type === 'metadataBackup') {
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
this.run = this._runMetadataBackup
return this._runMetadataBackup()
} else {
throw new Error(`No runner for the backup type ${type}`)
}
Object.assign(baseSettings, job.settings[''])
this._baseSettings = baseSettings
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
}
async _runMetadataBackup() {
@@ -97,6 +62,13 @@ exports.Backup = class Backup {
}
const config = this._config
const settings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...job.settings[''],
...job.settings[schedule.id],
}
const poolIds = extractIdsFromSimplePattern(job.pools)
const isEmptyPools = poolIds.length === 0
const isXoMetadata = job.xoMetadata !== undefined
@@ -104,8 +76,6 @@ exports.Backup = class Backup {
throw new Error('no metadata mode found')
}
const settings = this._settings
const { retentionPoolMetadata, retentionXoMetadata } = settings
if (
@@ -217,7 +187,14 @@ exports.Backup = class Backup {
const schedule = this._schedule
const config = this._config
const settings = this._settings
const { settings } = job
const scheduleSettings = {
...config.defaultSettings,
...config.vm.defaultSettings,
...settings[''],
...settings[schedule.id],
}
await Disposable.use(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
@@ -245,15 +222,14 @@ exports.Backup = class Backup {
})
)
),
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
async (srs, remoteAdapters, healthCheckSr) => {
async (srs, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
// remove srs that failed (already handled)
srs = srs.filter(_ => _ !== undefined)
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
return
}
@@ -263,27 +239,23 @@ exports.Backup = class Backup {
remoteAdapters = getAdaptersByRemote(remoteAdapters)
const allSettings = this._job.settings
const baseSettings = this._baseSettings
const handleVm = vmUuid =>
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
Disposable.use(this._getRecord('VM', vmUuid), vm =>
new VmBackup({
baseSettings,
config,
getSnapshotNameLabel,
healthCheckSr,
job,
// remotes,
remoteAdapters,
schedule,
settings: { ...settings, ...allSettings[vm.uuid] },
settings: { ...scheduleSettings, ...settings[vmUuid] },
srs,
vm,
}).run()
)
)
const { concurrency } = settings
const { concurrency } = scheduleSettings
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
}
)

View File

@@ -1,5 +1,3 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
exports.DurablePartition = class DurablePartition {

View File

@@ -1,64 +0,0 @@
'use strict'
const { Task } = require('./Task')
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
#xapi
#restoredVm
constructor({ restoredVm, xapi }) {
this.#restoredVm = restoredVm
this.#xapi = xapi
}
async run() {
return Task.run(
{
name: 'vmstart',
},
async () => {
let restoredVm = this.#restoredVm
const xapi = this.#xapi
const restoredId = restoredVm.uuid
// remove vifs
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
const start = new Date()
// start Vm
await xapi.callAsync(
'VM.start',
restoredVm.$ref,
false, // Start paused?
false // Skip pre-boot checks?
)
const started = new Date()
const timeout = 10 * 60 * 1000
const startDuration = started - start
let remainingTimeout = timeout - startDuration
if (remainingTimeout < 0) {
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
}
// wait for the 'Running' event to be really stored in local xapi object cache
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
timeout: remainingTimeout,
})
const running = new Date()
remainingTimeout -= running - started
if (remainingTimeout < 0) {
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
}
// wait for the guest tool version to be defined
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
timeout: remainingTimeout,
})
}
)
}
}

View File

@@ -1,5 +1,3 @@
'use strict'
const assert = require('assert')
const { formatFilenameDate } = require('./_filenameDate.js')
@@ -8,9 +6,9 @@ const { Task } = require('./Task.js')
const { watchStreamSize } = require('./_watchStreamSize.js')
exports.ImportVmBackup = class ImportVmBackup {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses } = {} }) {
this._adapter = adapter
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
this._importDeltaVmSettings = { newMacAddresses }
this._metadata = metadata
this._srUuid = srUuid
this._xapi = xapi
@@ -30,12 +28,7 @@ exports.ImportVmBackup = class ImportVmBackup {
} else {
assert.strictEqual(metadata.mode, 'delta')
const ignoredVdis = new Set(
Object.entries(this._importDeltaVmSettings.mapVdisSrs)
.filter(([_, srUuid]) => srUuid === null)
.map(([vdiUuid]) => vdiUuid)
)
backup = await adapter.readDeltaVmBackup(metadata, ignoredVdis)
backup = await adapter.readDeltaVmBackup(metadata)
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
}

View File

@@ -1,29 +1,21 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { synchronized } = require('decorator-synchronized')
const Disposable = require('promise-toolbox/Disposable')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
const pDefer = require('promise-toolbox/defer')
const groupBy = require('lodash/groupBy.js')
const pickBy = require('lodash/pickBy.js')
const { dirname, join, normalize, resolve } = require('path')
const Disposable = require('promise-toolbox/Disposable.js')
const fromCallback = require('promise-toolbox/fromCallback.js')
const fromEvent = require('promise-toolbox/fromEvent.js')
const pDefer = require('promise-toolbox/defer.js')
const pump = require('pump')
const { basename, dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, lstat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { readdir, stat } = require('fs-extra')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isMetadataFile, isVhdFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
@@ -47,12 +39,13 @@ const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
files.push({
realPath,
metadataPath,
@@ -74,18 +67,58 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
constructor(handler, { debounceResource = res => res, dirMode } = {}) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
}
get handler() {
return this._handler
}
async _deleteVhd(path) {
const handler = this._handler
const vhds = await asyncMapSettled(
await handler.list(dirname(path), {
filter: isVhdFile,
prependDir: true,
}),
async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
path,
}
} catch (error) {
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
// they are probably inconsequent to the backup process and should not
// fail it.
warn(`BackupNg#_deleteVhd ${path}`, { error })
}
}
)
const base = basename(path)
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
if (child === undefined) {
await handler.unlink(path)
return 0
}
try {
const childPath = child.path
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
await handler.rename(path, childPath)
return mergedDataSize
} catch (error) {
handler.unlink(path).catch(warn)
throw error
}
}
async _findPartition(devicePath, partitionId) {
const partitions = await listPartitions(devicePath)
const partition = partitions.find(_ => _.id === partitionId)
@@ -95,6 +128,9 @@ class RemoteAdapter {
return partition
}
_getLvmLogicalVolumes = Disposable.factory(this._getLvmLogicalVolumes)
_getLvmLogicalVolumes = deduped(this._getLvmLogicalVolumes, (devicePath, pvId, vgName) => [devicePath, pvId, vgName])
_getLvmLogicalVolumes = debounceResourceFactory(this._getLvmLogicalVolumes)
async *_getLvmLogicalVolumes(devicePath, pvId, vgName) {
yield this._getLvmPhysicalVolume(devicePath, pvId && (await this._findPartition(devicePath, pvId)))
@@ -106,6 +142,9 @@ class RemoteAdapter {
}
}
_getLvmPhysicalVolume = Disposable.factory(this._getLvmPhysicalVolume)
_getLvmPhysicalVolume = deduped(this._getLvmPhysicalVolume, (devicePath, partition) => [devicePath, partition?.id])
_getLvmPhysicalVolume = debounceResourceFactory(this._getLvmPhysicalVolume)
async *_getLvmPhysicalVolume(devicePath, partition) {
const args = []
if (partition !== undefined) {
@@ -126,6 +165,9 @@ class RemoteAdapter {
}
}
_getPartition = Disposable.factory(this._getPartition)
_getPartition = deduped(this._getPartition, (devicePath, partition) => [devicePath, partition?.id])
_getPartition = debounceResourceFactory(this._getPartition)
async *_getPartition(devicePath, partition) {
const options = ['loop', 'ro']
@@ -178,6 +220,7 @@ class RemoteAdapter {
})
}
_usePartitionFiles = Disposable.factory(this._usePartitionFiles)
async *_usePartitionFiles(diskId, partitionId, paths) {
const path = yield this.getPartition(diskId, partitionId)
@@ -189,22 +232,6 @@ class RemoteAdapter {
return files
}
// check if we will be allowed to merge a a vhd created in this adapter
// with the vhd at path `path`
async isMergeableParent(packedParentUid, path) {
return await Disposable.use(openVhd(this.handler, path), vhd => {
// this baseUuid is not linked with this vhd
if (!vhd.footer.uuid.equals(packedParentUid)) {
return false
}
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.#useVhdDirectory()
})
}
fetchPartitionFiles(diskId, partitionId, paths) {
const { promise, reject, resolve } = pDefer()
Disposable.use(
@@ -226,9 +253,16 @@ class RemoteAdapter {
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
let mergedDataSize = 0
await asyncMapSettled(backups, ({ _filename, vhds }) =>
Promise.all([
handler.unlink(_filename),
asyncMap(Object.values(vhds), async _ => {
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
}),
])
)
return mergedDataSize
}
async deleteMetadataBackup(backupId) {
@@ -258,46 +292,22 @@ class RemoteAdapter {
)
}
deleteVmBackup(file) {
return this.deleteVmBackups([file])
}
async deleteVmBackup(filename) {
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
metadata._filename = filename
async deleteVmBackups(files) {
const metadatas = await asyncMap(files, file => this.readVmBackupMetadata(file))
const { delta, full, ...others } = groupBy(metadatas, 'mode')
const unsupportedModes = Object.keys(others)
if (unsupportedModes.length !== 0) {
throw new Error('no deleter for backup modes: ' + unsupportedModes.join(', '))
if (metadata.mode === 'delta') {
await this.deleteDeltaVmBackups([metadata])
} else if (metadata.mode === 'full') {
await this.deleteFullVmBackups([metadata])
} else {
throw new Error(`no deleter for backup mode ${metadata.mode}`)
}
await Promise.all([
delta !== undefined && this.deleteDeltaVmBackups(delta),
full !== undefined && this.deleteFullVmBackups(full),
])
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, logWarn: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
}
#getCompressionType() {
return this._vhdDirectoryCompression
}
#useVhdDirectory() {
return this.handler.useVhdDirectory()
}
#useAlias() {
return this.#useVhdDirectory()
}
getDisk = Disposable.factory(this.getDisk)
getDisk = deduped(this.getDisk, diskId => [diskId])
getDisk = debounceResourceFactory(this.getDisk)
async *getDisk(diskId) {
const handler = this._handler
@@ -334,6 +344,7 @@ class RemoteAdapter {
// - `<partitionId>`: partitioned disk
// - `<pvId>/<vgName>/<lvName>`: LVM on a partitioned disk
// - `/<vgName>/lvName>`: LVM on a raw disk
getPartition = Disposable.factory(this.getPartition)
async *getPartition(diskId, partitionId) {
const devicePath = yield this.getDisk(diskId)
if (partitionId === undefined) {
@@ -350,26 +361,13 @@ class RemoteAdapter {
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
}
// if we use alias on this remote, we have to name the file alias.vhd
getVhdFileName(baseName) {
if (this.#useAlias()) {
return `${baseName}.alias.vhd`
}
return `${baseName}.vhd`
}
async listAllVmBackups() {
const handler = this._handler
const backups = { __proto__: null }
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
// ignore hidden and lock files
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
const vmBackups = await this.listVmBackups(entry)
if (vmBackups.length !== 0) {
backups[entry] = vmBackups
}
}
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
const vmBackups = await this.listVmBackups(vmUuid)
backups[vmUuid] = vmBackups
})
return backups
@@ -382,12 +380,8 @@ class RemoteAdapter {
const entriesMap = {}
await asyncMap(await readdir(path), async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
@@ -458,94 +452,34 @@ class RemoteAdapter {
return backupsByPool
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
}
async #getCachabledDataListVmBackups(dir) {
async listVmBackups(vmUuid, predicate) {
const handler = this._handler
const backups = {}
const backups = []
try {
const files = await handler.list(dir, {
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
filter: isMetadataFile,
prependDir: true,
})
await asyncMap(files, async file => {
try {
const metadata = await this.readVmBackupMetadata(file)
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups[file] = metadata
if (predicate === undefined || predicate(metadata)) {
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups.push(metadata)
}
} catch (error) {
warn(`can't read vm backup metadata`, { error, file, dir })
warn(`listVmBackups ${file}`, { error })
}
})
return backups
} catch (error) {
let code
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
throw error
}
}
}
// use _ to mark this method as private by convention
// since we decorate it with synchronized.withKey in the constructor
// and # function are not writeable.
//
// read the list of backup of a Vm from cache
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(dir)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeVmBackupsCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
if (cached === undefined) {
return []
}
Object.values(cached).forEach(metadata => {
if (predicate === undefined || predicate(metadata)) {
backups.push(metadata)
}
})
return backups.sort(compareTimestamp)
}
@@ -571,25 +505,6 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {
await this.outputStream(path, input, { checksum, validator })
}
}
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
await this._handler.outputStream(path, input, {
checksum,
@@ -601,42 +516,14 @@ class RemoteAdapter {
})
}
// open the hierarchy of ancestors until we find a full one
async _createSyntheticStream(handler, path) {
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
// I don't want the vhds to be disposed on return
// but only when the stream is done ( or failed )
let disposed = false
const disposeOnce = async () => {
if (!disposed) {
disposed = true
try {
await disposableSynthetic.dispose()
} catch (error) {
warn('openVhd: failed to dispose VHDs', { error })
}
}
}
const synthetic = disposableSynthetic.value
await synthetic.readBlockAllocationTable()
const stream = await synthetic.stream()
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
stream.on('error', disposeOnce)
return stream
}
async readDeltaVmBackup(metadata, ignoredVdis) {
async readDeltaVmBackup(metadata) {
const handler = this._handler
const { vbds, vhds, vifs, vm } = metadata
const { vbds, vdis, vhds, vifs, vm } = metadata
const dir = dirname(metadata._filename)
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
const streams = {}
await asyncMapSettled(Object.keys(vdis), async ref => {
streams[`${ref}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[ref]))
await asyncMapSettled(Object.keys(vdis), async id => {
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
})
return {
@@ -654,10 +541,7 @@ class RemoteAdapter {
}
async readVmBackupMetadata(path) {
// _filename is a private field used to compute the backup id
//
// it's enumerable to make it cacheable
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
}
}
@@ -672,30 +556,4 @@ Object.assign(RemoteAdapter.prototype, {
isValidXva,
})
decorateMethodsWith(RemoteAdapter, {
_getLvmLogicalVolumes: compose([
Disposable.factory,
[deduped, (devicePath, pvId, vgName) => [devicePath, pvId, vgName]],
debounceResourceFactory,
]),
_getLvmPhysicalVolume: compose([
Disposable.factory,
[deduped, (devicePath, partition) => [devicePath, partition?.id]],
debounceResourceFactory,
]),
_getPartition: compose([
Disposable.factory,
[deduped, (devicePath, partition) => [devicePath, partition?.id]],
debounceResourceFactory,
]),
_usePartitionFiles: Disposable.factory,
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
getPartition: Disposable.factory,
})
exports.RemoteAdapter = RemoteAdapter

View File

@@ -1,5 +1,3 @@
'use strict'
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')

View File

@@ -1,18 +1,12 @@
'use strict'
const CancelToken = require('promise-toolbox/CancelToken')
const CancelToken = require('promise-toolbox/CancelToken.js')
const Zone = require('node-zone')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
const logAfterEnd = () => {
throw new Error('task has already ended')
}
const noop = Function.prototype
const serializeErrors = errors => (Array.isArray(errors) ? errors.map(serializeError) : errors)
// Create a serializable object from an error.
//
// Otherwise some fields might be non-enumerable and missing from logs.
@@ -21,7 +15,6 @@ const serializeError = error =>
? {
...error, // Copy enumerable properties.
code: error.code,
errors: serializeErrors(error.errors), // supports AggregateError
message: error.message,
name: error.name,
stack: error.stack,

View File

@@ -1,5 +1,3 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')

View File

@@ -1,14 +1,10 @@
'use strict'
const assert = require('assert')
const findLast = require('lodash/findLast.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const keyBy = require('lodash/keyBy.js')
const mapValues = require('lodash/mapValues.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { formatDateTime } = require('@xen-orchestra/xapi')
@@ -24,13 +20,6 @@ const { watchStreamSize } = require('./_watchStreamSize.js')
const { debug, warn } = createLogger('xo:backups:VmBackup')
class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}
const asyncEach = async (iterable, fn, thisArg = iterable) => {
for (const item of iterable) {
await fn.call(thisArg, item)
@@ -44,28 +33,12 @@ const forkDeltaExport = deltaExport =>
},
})
class VmBackup {
constructor({
config,
getSnapshotNameLabel,
healthCheckSr,
job,
remoteAdapters,
remotes,
schedule,
settings,
srs,
vm,
}) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
// don't match replicated VMs created by this very job otherwise they
// will be replicated again and again
throw new Error('cannot backup a VM created by this very job')
}
exports.VmBackup = class VmBackup {
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
this.remotes = remotes
this.scheduleId = schedule.id
this.timestamp = undefined
@@ -79,7 +52,6 @@ class VmBackup {
this._fullVdisRequired = undefined
this._getSnapshotNameLabel = getSnapshotNameLabel
this._isDelta = job.mode === 'delta'
this._healthCheckSr = healthCheckSr
this._jobId = job.id
this._jobSnapshots = undefined
this._xapi = vm.$xapi
@@ -106,6 +78,7 @@ class VmBackup {
: [FullBackupWriter, FullReplicationWriter]
const allSettings = job.settings
Object.keys(remoteAdapters).forEach(remoteId => {
const targetSettings = {
...settings,
@@ -128,49 +101,33 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, step, parallel = true) {
async _callWriters(fn, warnMessage, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
try {
await fn(writer)
} catch (error) {
writers.delete(writer)
throw error
}
return
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await callWriter(writer)
await fn(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
throw new Error('all targets have failed, step: ' + warnMessage)
}
}
@@ -197,10 +154,7 @@ class VmBackup {
const settings = this._settings
const doSnapshot =
settings.unconditionalSnapshot ||
this._isDelta ||
(!settings.offlineBackup && vm.power_state === 'Running') ||
settings.snapshotRetention !== 0
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
if (doSnapshot) {
await Task.run({ name: 'snapshot' }, async () => {
if (!settings.bypassVdiChainsCheck) {
@@ -208,9 +162,7 @@ class VmBackup {
}
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
ignoreNobakVdis: true,
name_label: this._getSnapshotNameLabel(vm),
unplugVusbs: true,
})
this.timestamp = Date.now()
@@ -332,23 +284,17 @@ class VmBackup {
}
async _removeUnusedSnapshots() {
const allSettings = this.job.settings
const baseSettings = this._baseSettings
const baseVmRef = this._baseVm?.$ref
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
const { scheduleId } = this
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
const baseVmRef = this._baseVm?.$ref
const xapi = this._xapi
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
const settings = {
...baseSettings,
...allSettings[scheduleId],
...allSettings[this.vm.uuid],
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
}
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
}
})
})
}
@@ -357,14 +303,12 @@ class VmBackup {
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
if (baseVm === undefined) {
debug('no base VM found')
return
}
const fullInterval = this._settings.fullInterval
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
debug('not using base VM becaust fullInterval reached')
return
}
@@ -375,17 +319,10 @@ class VmBackup {
const baseUuidToSrcVdi = new Map()
await asyncMap(await baseVm.$getDisks(), async baseRef => {
const [baseUuid, snapshotOf] = await Promise.all([
xapi.getField('VDI', baseRef, 'uuid'),
xapi.getField('VDI', baseRef, 'snapshot_of'),
])
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(baseUuid, srcVdi)
} else {
debug('ignore snapshot VDI because no longer present on VM', {
vdi: baseUuid,
})
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
}
})
@@ -396,23 +333,9 @@ class VmBackup {
false
)
if (presentBaseVdis.size === 0) {
debug('no base VM found')
return
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
if (!presentBaseVdis.has(baseUuid)) {
fullVdisRequired.add(srcVdi.uuid)
}
})
@@ -421,24 +344,7 @@ class VmBackup {
this._fullVdisRequired = fullVdisRequired
}
async _healthCheck() {
const settings = this._settings
if (this._healthCheckSr === undefined) {
return
}
// check if current VM has tags
const { tags } = this.vm
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
return
}
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
}
run = defer(this.run)
async run($defer) {
const settings = this._settings
assert(
@@ -448,9 +354,7 @@ class VmBackup {
await this._callWriters(async writer => {
await writer.beforeBackup()
$defer(async () => {
await writer.afterBackup()
})
$defer(() => writer.afterBackup())
}, 'writer.beforeBackup()')
await this._fetchJobSnapshots()
@@ -486,11 +390,5 @@ class VmBackup {
await this._fetchJobSnapshots()
await this._removeUnusedSnapshots()
}
await this._healthCheck()
}
}
exports.VmBackup = VmBackup
decorateMethodsWith(VmBackup, {
run: defer,
})

View File

@@ -1,5 +1,3 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')

View File

@@ -1,6 +1,4 @@
'use strict'
exports.isMetadataFile = filename => filename.endsWith('.json')
exports.isVhdFile = filename => filename.endsWith('.vhd')
exports.isXvaFile = filename => filename.endsWith('.xva')
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
exports.isXvaSumFile = filename => filename.endsWith('.xva.cheksum')

View File

@@ -1,16 +1,11 @@
'use strict'
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
require('@xen-orchestra/log').createLogger('xo:backups:worker')
)
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const Disposable = require('promise-toolbox/Disposable.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const { compose } = require('@vates/compose')
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { deduped } = require('@vates/disposable/deduped.js')
const { getHandler } = require('@xen-orchestra/fs')
const { parseDuration } = require('@vates/parse-duration')
@@ -63,6 +58,11 @@ class BackupWorker {
}).run()
}
getAdapter = Disposable.factory(this.getAdapter)
getAdapter = deduped(this.getAdapter, remote => [remote.url])
getAdapter = compose(this.getAdapter, function (resource) {
return this.debounceResource(resource)
})
async *getAdapter(remote) {
const handler = getHandler(remote, this.#remoteOptions)
await handler.sync()
@@ -70,13 +70,17 @@ class BackupWorker {
yield new RemoteAdapter(handler, {
debounceResource: this.debounceResource,
dirMode: this.#config.dirMode,
vhdDirectoryCompression: this.#config.vhdDirectoryCompression,
})
} finally {
await handler.forget()
}
}
getXapi = Disposable.factory(this.getXapi)
getXapi = deduped(this.getXapi, ({ url }) => [url])
getXapi = compose(this.getXapi, function (resource) {
return this.debounceResource(resource)
})
async *getXapi({ credentials: { username: user, password }, ...opts }) {
const xapi = new Xapi({
...this.#xapiOptions,
@@ -98,30 +102,6 @@ class BackupWorker {
}
}
decorateMethodsWith(BackupWorker, {
getAdapter: compose([
Disposable.factory,
[deduped, remote => [remote.url]],
[
compose,
function (resource) {
return this.debounceResource(resource)
},
],
]),
getXapi: compose([
Disposable.factory,
[deduped, xapi => [xapi.url]],
[
compose,
function (resource) {
return this.debounceResource(resource)
},
],
]),
})
// Received message:
//
// Message {

View File

@@ -1,7 +1,5 @@
'use strict'
const cancelable = require('promise-toolbox/cancelable')
const CancelToken = require('promise-toolbox/CancelToken')
const cancelable = require('promise-toolbox/cancelable.js')
const CancelToken = require('promise-toolbox/CancelToken.js')
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
//

View File

@@ -1,444 +0,0 @@
'use strict'
/* eslint-env jest */
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const uuid = require('uuid')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
handler = getHandler({ url: `file://${tempDir}` })
await handler.sync()
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
basePath = `vdis/${jobId}/${vdiId}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
await handler.forget()
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
async function generateVhd(path, opts = {}) {
let vhd
let dataPath = path
if (opts.useAlias) {
await handler.mkdir(dirname(path) + '/data/')
dataPath = dirname(path) + '/data/' + basename(path)
}
if (opts.mode === 'directory') {
await handler.mkdir(dataPath)
vhd = new VhdDirectory(handler, dataPath)
} else {
const fd = await handler.openFile(dataPath, 'wx')
vhd = new VhdFile(handler, fd)
}
vhd.header = { ...VHDHEADER, ...opts.header }
vhd.footer = { ...VHDFOOTER, ...opts.footer, uuid: uniqueIdBuffer() }
if (vhd.header.parentUuid) {
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
} else {
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
}
if (opts.useAlias === true) {
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
}
await vhd.writeBlockAllocationTable()
await vhd.writeHeader()
await vhd.writeFooter()
return vhd
}
test('It remove broken vhd', async () => {
// todo also tests a directory and an alias
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const logInfo = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
expect(loggued).toEqual(`VHD check error`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
expect((await handler.list(basePath)).length).toEqual(0)
})
test('it remove vhd with missing or multiple ancestors', async () => {
// one with a broken parent, should be deleted
await generateVhd(`${basePath}/abandonned.vhd`, {
header: {
parentUnicodeName: 'gone.vhd',
parentUuid: uniqueIdBuffer(),
},
})
// one orphan, which is a full vhd, no parent : should stay
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan in the metadata : should stay
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
},
})
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [`${basePath}/child.vhd`, `${basePath}/abandonned.vhd`],
}),
{ flags: 'w' }
)
// clean
let loggued = ''
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
],
})
)
await generateVhd(`${basePath}/abandonned.vhd`)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
},
})
let loggued = ''
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 12000, // a size too small
vhds: [
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
},
})
// a grand child
await generateVhd(`${basePath}/grandchild.vhd`, {
header: {
parentUnicodeName: 'child.vhd',
parentUuid: child.footer.uuid,
},
})
let loggued = []
const logInfo = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
const [merging] = loggued
expect(merging).toEqual(`merging VHD chain`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
expect(metadata.size).toEqual(209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(2)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
})
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
})
)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
},
})
// a merge in progress file
await handler.writeFile(
`${basePath}/.orphan.vhd.merge.json`,
JSON.stringify({
parent: {
header: orphan.header.checksum,
},
child: {
header: child.header.checksum,
},
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(1)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
})
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
// the message an resulting files should be identical to the output with vhd files which is tested independantly
describe('tests multiple combination ', () => {
for (const useAlias of [true, false]) {
for (const vhdMode of ['file', 'directory']) {
test(`alias : ${useAlias}, mode: ${vhdMode}`, async () => {
// a broken VHD
if (useAlias) {
await handler.mkdir(basePath + '/data')
}
const brokenVhdDataPath = basePath + (useAlias ? '/data/broken.vhd' : '/broken.vhd')
if (vhdMode === 'directory') {
await handler.mkdir(brokenVhdDataPath)
} else {
await handler.writeFile(brokenVhdDataPath, 'notreallyavhd')
}
if (useAlias) {
await VhdAbstract.createAlias(handler, 'broken.alias.vhd', brokenVhdDataPath)
}
// a vhd non referenced in metada
await generateVhd(`${basePath}/nonreference.vhd`, { useAlias, mode: vhdMode })
// an abandonded delta vhd without its parent
await generateVhd(`${basePath}/abandonned.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'gone.vhd',
parentUuid: uniqueIdBuffer(),
},
})
// an ancestor of a vhd present in metadata
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
mode: vhdMode,
})
const child = await generateVhd(`${basePath}/child.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: ancestor.footer.uuid,
},
})
// a grand child vhd in metadata
await generateVhd(`${basePath}/grandchild.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: child.footer.uuid,
},
})
// an older parent that was merging in clean
const cleanAncestor = await generateVhd(`${basePath}/cleanAncestor.vhd`, {
useAlias,
mode: vhdMode,
})
// a clean vhd in metadata
const clean = await generateVhd(`${basePath}/clean.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: cleanAncestor.footer.uuid,
},
})
await handler.writeFile(
`${basePath}/.cleanAncestor.vhd${useAlias ? '.alias.vhd' : ''}.merge.json`,
JSON.stringify({
parent: {
header: cleanAncestor.header.checksum,
},
child: {
header: clean.header.checksum,
},
})
)
// the metadata file
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
// grand child and clean vhd should not have changed
const survivors = await handler.list(basePath)
// console.log(survivors)
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
} else {
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
expect(survivors).toContain('grandchild.vhd')
expect(survivors.length).toEqual(3)
}
})
}
}
})
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
expect(await handler.list(basePath)).toEqual([])
})
test('check Aliases should work alone', async () => {
await handler.mkdir('vhds')
await handler.mkdir('vhds/data')
await generateVhd(`vhds/data/ok.vhd`)
await VhdAbstract.createAlias(handler, 'vhds/ok.alias.vhd', 'vhds/data/ok.vhd')
await VhdAbstract.createAlias(handler, 'vhds/missingData.alias.vhd', 'vhds/data/nonexistent.vhd')
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
expect(alias.length).toEqual(1)
const data = await handler.list('vhds/data')
expect(data.length).toEqual(1)
})

View File

@@ -1,79 +1,86 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { default: Vhd, mergeVhd } = require('vhd-lib')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants.js')
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
return vhds.every(vhd => vhd instanceof VhdFile)
}
let child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(handler, vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
chain
.slice(1)
.reverse()
.forEach(parent => {
onLog(`the parent ${parent} of the child ${child} is unused`)
})
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) {
if (merge) {
logInfo(`merging VHD chain`, { chain })
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
if (children.length !== 1) {
// TODO: implement merging multiple children
children.length = 1
child = children[0]
}
onLog(`merging ${child} into ${parent}`)
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
onLog(`merging ${child}: ${done}/${total}`)
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
await mergeVhd(
handler,
parent,
handler,
child,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children),
{
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
)
clearInterval(handle)
await Promise.all([
handler.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
onLog(`the VHD ${child} is unused`)
if (remove) {
onLog(`deleting unused VHD ${child}`)
return handler.unlink(child)
}
}),
])
}
}
})
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir, logWarn) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
const INTERRUPTED_VHDS_REG = /^(?:(.+)\/)?\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir) => {
const vhds = []
const interruptedVhds = new Set()
await asyncMap(
await handler.list(`${vmDir}/vdis`, {
@@ -88,205 +95,84 @@ const listVhds = async (handler, vmDir, logWarn) => {
async vdiDir => {
const list = await handler.list(vdiDir, {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
prependDir: true,
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
await asyncMap(list, async file => {
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
vhds.push(file)
} else {
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
const [, dir, file] = res
interruptedVhds.add(`${dir}/${file}`)
}
})
}
)
)
return { vhds, interruptedVhds, aliases }
return { vhds, interruptedVhds }
}
async function checkAliases(
aliasPaths,
targetDataRepository,
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
}
continue
}
try {
const { dispose } = await openVhd(handler, target)
try {
await dispose()
} catch (e) {
// error during dispose should not trigger a deletion
}
} catch (error) {
logWarn('missing or broken alias target', { alias, target, error })
if (remove) {
try {
await VhdAbstract.unlink(handler, alias)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { alias, target, error })
}
}
}
continue
}
aliasFound.push(resolve('/', target))
}
const vhds = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
if (remove) {
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, onLog = noop }) {
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhds = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
const vhdsList = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhds, async path => {
await asyncMap(vhdsList.vhds, async path => {
try {
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), async vhd => {
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter(!vhdsList.interruptedVhds.has(path))
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
await vhd.check()
})
vhdChildren[parent] = path
}
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
onLog(`error while checking the VHD with path ${path}`, { error })
if (error?.code === 'ERR_ASSERTION' && remove) {
logInfo('deleting broken VHD', { path })
return VhdAbstract.unlink(handler, path)
onLog(`deleting broken ${path}`)
await handler.unlink(path)
}
}
})
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const { statePath } = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
logWarn('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
logInfo('deleting orphan merge state', { statePath })
await handler.unlink(statePath)
}
}
}
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhdPath => {
const parent = vhdParents[vhdPath]
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhdPath]
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
vhds.delete(vhd)
logWarn('parent VHD is missing', { parent, child: vhdPath })
onLog(`the parent ${parent} of the VHD ${vhd} is missing`)
if (remove) {
logInfo('deleting orphan VHD', { path: vhdPath })
deletions.push(VhdAbstract.unlink(handler, vhdPath))
onLog(`deleting orphan VHD ${vhd}`)
deletions.push(handler.unlink(vhd))
}
}
}
@@ -302,7 +188,7 @@ exports.cleanVm = async function cleanVm(
await Promise.all(deletions)
}
const jsons = new Set()
const jsons = []
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -310,7 +196,7 @@ exports.cleanVm = async function cleanVm(
})
entries.forEach(path => {
if (isMetadataFile(path)) {
jsons.add(path)
jsons.push(path)
} else if (isXvaFile(path)) {
xvas.add(path)
} else if (isXvaSumFile(path)) {
@@ -322,7 +208,7 @@ exports.cleanVm = async function cleanVm(
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await this.isValidXva(path))) {
logWarn('XVA might be broken', { path })
onLog(`the XVA with path ${path} is potentially broken`)
}
})
@@ -332,25 +218,22 @@ exports.cleanVm = async function cleanVm(
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
let metadata
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
logWarn('failed to read backup metadata', { path: json, error })
jsons.delete(json)
return
}
const metadata = JSON.parse(await handler.readFile(json))
const { mode } = metadata
let size
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
size = await handler.getSize(linkedXva).catch(error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
onLog(`the XVA linked to the metadata ${json} is missing`)
if (remove) {
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
onLog(`deleting incomplete backup ${json}`)
await handler.unlink(json)
}
}
@@ -360,31 +243,44 @@ exports.cleanVm = async function cleanVm(
return Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
})()
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (missingVhds.length === 0) {
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
linkedVhds.forEach(path => {
vhdsToJSons[path] = json
size = await asyncMap(linkedVhds, vhd => handler.getSize(vhd)).then(sum, error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
onLog(`Some VHDs linked to the metadata ${json} are missing`)
if (remove) {
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
onLog(`deleting incomplete backup ${json}`)
await handler.unlink(json)
}
}
}
const metadataSize = metadata.size
if (size !== undefined && metadataSize !== size) {
onLog(`incorrect size in metadata: ${metadataSize ?? 'none'} instead of ${size}`)
// don't update if the the stored size is greater than found files,
// it can indicates a problem
if (fixMetadata && (metadataSize === undefined || metadataSize < size)) {
try {
metadata.size = size
await handler.writeFile(json, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${json}`, { error })
}
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -408,15 +304,15 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.unshift(vhd)
chain.push(vhd)
return chain
}
}
logWarn('unused VHD', { path: vhd })
onLog(`the VHD ${vhd} is unused`)
if (remove) {
logInfo('deleting unused VHD', { path: vhd })
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
onLog(`deleting unused VHD ${vhd}`)
unusedVhdsDeletion.push(handler.unlink(vhd))
}
}
@@ -425,112 +321,38 @@ exports.cleanVm = async function cleanVm(
})
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
if (merge) {
vhdsList.interruptedVhds.forEach(parent => {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
})
}
Object.values(vhdChainsToMerge).forEach(chain => {
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
if (chain !== undefined) {
toMerge.push(chain)
}
})
}
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, { logInfo, logWarn, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
unusedVhdsDeletion.push(mergeVhdChain(chain, { handler, onLog, remove, merge }))
}
})
}
await Promise.all([
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
logWarn('unused XVA', { path })
onLog(`the XVA ${path} is unused`)
if (remove) {
logInfo('deleting unused XVA', { path })
onLog(`deleting unused XVA ${path}`)
return handler.unlink(path)
}
}),
asyncMap(xvaSums, path => {
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
logInfo('unused XVA checksum', { path })
onLog(`the XVA checksum ${path} is unused`)
if (remove) {
logInfo('deleting unused XVA checksum', { path })
onLog(`deleting unused XVA checksum ${path}`)
return handler.unlink(path)
}
}
}),
])
// update size for delta metadata with merged VHD
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = JSON.parse(await handler.readFile(metadataPath))
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
const { mode, size, vhds, xva } = metadata
try {
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize === undefined) {
return
}
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
}
} catch (error) {
logWarn('failed to get backup size', { backup: metadataPath, error })
return
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
}
}
})
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,
}
}

Some files were not shown because too many files have changed in this diff Show More