Compare commits
1 Commits
nr-fix-S3-
...
computed-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
595c4bd5a8 |
@@ -3,12 +3,63 @@
|
||||
# Julien Fontanet's configuration
|
||||
# https://gist.github.com/julien-f/8096213
|
||||
|
||||
# Top-most EditorConfig file.
|
||||
root = true
|
||||
|
||||
# Common config.
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespaces = true
|
||||
|
||||
# CoffeeScript
|
||||
#
|
||||
# https://github.com/polarmobile/coffeescript-style-guide/blob/master/README.md
|
||||
[*.{,lit}coffee]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Markdown
|
||||
[*.{md,mdwn,mdown,markdown}]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
# Package.json
|
||||
#
|
||||
# This indentation style is the one used by npm.
|
||||
[/package.json]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Jade
|
||||
[*.jade]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# JavaScript
|
||||
#
|
||||
# Two spaces seems to be the standard most common style, at least in
|
||||
# Node.js (http://nodeguide.com/style.html#tabs-vs-spaces).
|
||||
[*.js]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Less
|
||||
[*.less]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Sass
|
||||
#
|
||||
# Style used for http://libsass.com
|
||||
[*.s[ac]ss]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# YAML
|
||||
#
|
||||
# Only spaces are allowed.
|
||||
[*.yaml]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
# xo_fs_nfs=nfs://ip:/folder
|
||||
# xo_fs_smb=smb://login:pass@domain\\ip\folder
|
||||
36
.eslintrc.js
36
.eslintrc.js
@@ -1,38 +1,12 @@
|
||||
module.exports = {
|
||||
extends: ['plugin:eslint-comments/recommended', 'standard', 'standard-jsx', 'prettier'],
|
||||
extends: ['standard', 'standard-jsx'],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
$Diff: true,
|
||||
$ElementType: true,
|
||||
$Exact: true,
|
||||
$Keys: true,
|
||||
$PropertyType: true,
|
||||
$Shape: true,
|
||||
},
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parser: 'babel-eslint',
|
||||
rules: {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'react/jsx-handler-names': 'off',
|
||||
|
||||
// disabled because not always relevant, we might reconsider in the future
|
||||
//
|
||||
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
|
||||
//
|
||||
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
'no-var': 'error',
|
||||
'prefer-const': 'error',
|
||||
},
|
||||
}
|
||||
|
||||
16
.flowconfig
16
.flowconfig
@@ -1,16 +0,0 @@
|
||||
[ignore]
|
||||
<PROJECT_ROOT>/node_modules/.*
|
||||
|
||||
[include]
|
||||
|
||||
[libs]
|
||||
|
||||
[lints]
|
||||
|
||||
[options]
|
||||
esproposal.decorators=ignore
|
||||
esproposal.optional_chaining=enable
|
||||
include_warnings=true
|
||||
module.use_strict=true
|
||||
|
||||
[strict]
|
||||
34
.gitignore
vendored
34
.gitignore
vendored
@@ -1,37 +1,9 @@
|
||||
/_book/
|
||||
/coverage/
|
||||
/dist/
|
||||
/node_modules/
|
||||
/lerna-debug.log
|
||||
/lerna-debug.log.*
|
||||
|
||||
/@vates/*/dist/
|
||||
/@vates/*/node_modules/
|
||||
/@xen-orchestra/*/dist/
|
||||
/@xen-orchestra/*/node_modules/
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/@xen-orchestra/proxy/src/app/mixins/index.mjs
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/examples/node_modules/
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
/packages/xo-server/src/api/index.mjs
|
||||
/packages/xo-server/src/xapi/mixins/index.mjs
|
||||
/packages/xo-server/src/xo-mixins/index.mjs
|
||||
|
||||
/packages/xo-server-auth-ldap/ldap.cache.conf
|
||||
|
||||
/packages/xo-web/src/common/intl/locales/index.js
|
||||
/packages/xo-web/src/common/themes/index.js
|
||||
/src/common/intl/locales/index.js
|
||||
/src/common/themes/index.js
|
||||
|
||||
npm-debug.log
|
||||
npm-debug.log.*
|
||||
pnpm-debug.log
|
||||
pnpm-debug.log.*
|
||||
yarn-error.log
|
||||
yarn-error.log.*
|
||||
.env
|
||||
|
||||
10
.npmignore
Normal file
10
.npmignore
Normal file
@@ -0,0 +1,10 @@
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
@@ -1,11 +1,4 @@
|
||||
module.exports = {
|
||||
arrowParens: 'avoid',
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
// https://team.vates.fr/vates/pl/a1i8af1b9id7pgzm3jcg4toacy
|
||||
printWidth: 120,
|
||||
}
|
||||
|
||||
22
.travis.yml
22
.travis.yml
@@ -1,23 +1,11 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- 14
|
||||
- '6'
|
||||
#- '4' # npm 3's flat tree is needed because some packages do not
|
||||
# declare their deps correctly (e.g. chartist-plugin-tooltip)
|
||||
|
||||
cache: yarn
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
sudo: false
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
- vmdk-stream-converter
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
- export PATH="$HOME/.yarn/bin:$PATH"
|
||||
|
||||
cache:
|
||||
yarn: true
|
||||
|
||||
script:
|
||||
- yarn run travis-tests
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,46 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/coalesce-calls
|
||||
|
||||
[](https://npmjs.org/package/@vates/coalesce-calls)  [](https://bundlephobia.com/result?p=@vates/coalesce-calls) [](https://npmjs.org/package/@vates/coalesce-calls)
|
||||
|
||||
> Wraps an async function so that concurrent calls will be coalesced
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-calls):
|
||||
|
||||
```
|
||||
> npm install --save @vates/coalesce-calls
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
|
||||
const connect = coalesceCalls(async function () {
|
||||
// async operation
|
||||
})
|
||||
|
||||
connect()
|
||||
|
||||
// the previous promise result will be returned if the operation is not
|
||||
// complete yet
|
||||
connect()
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,13 +0,0 @@
|
||||
```js
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
|
||||
const connect = coalesceCalls(async function () {
|
||||
// async operation
|
||||
})
|
||||
|
||||
connect()
|
||||
|
||||
// the previous promise result will be returned if the operation is not
|
||||
// complete yet
|
||||
connect()
|
||||
```
|
||||
@@ -1,14 +0,0 @@
|
||||
exports.coalesceCalls = function (fn) {
|
||||
let promise
|
||||
const clean = () => {
|
||||
promise = undefined
|
||||
}
|
||||
return function () {
|
||||
if (promise !== undefined) {
|
||||
return promise
|
||||
}
|
||||
promise = fn.apply(this, arguments)
|
||||
promise.then(clean, clean)
|
||||
return promise
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { coalesceCalls } = require('./')
|
||||
|
||||
const pDefer = () => {
|
||||
const r = {}
|
||||
r.promise = new Promise((resolve, reject) => {
|
||||
r.reject = reject
|
||||
r.resolve = resolve
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
describe('coalesceCalls', () => {
|
||||
it('decorates an async function', async () => {
|
||||
const fn = coalesceCalls(promise => promise)
|
||||
|
||||
const defer1 = pDefer()
|
||||
const promise1 = fn(defer1.promise)
|
||||
const defer2 = pDefer()
|
||||
const promise2 = fn(defer2.promise)
|
||||
|
||||
defer1.resolve('foo')
|
||||
expect(await promise1).toBe('foo')
|
||||
expect(await promise2).toBe('foo')
|
||||
|
||||
const defer3 = pDefer()
|
||||
const promise3 = fn(defer3.promise)
|
||||
|
||||
defer3.resolve('bar')
|
||||
expect(await promise3).toBe('bar')
|
||||
})
|
||||
})
|
||||
@@ -1,35 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/coalesce-calls",
|
||||
"description": "Wraps an async function so that concurrent calls will be coalesced",
|
||||
"keywords": [
|
||||
"async",
|
||||
"calls",
|
||||
"coalesce",
|
||||
"decorate",
|
||||
"decorator",
|
||||
"merge",
|
||||
"promise",
|
||||
"wrap",
|
||||
"wrapper"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/coalesce-calls",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/coalesce-calls",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,81 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/compose
|
||||
|
||||
[](https://npmjs.org/package/@vates/compose)  [](https://bundlephobia.com/result?p=@vates/compose) [](https://npmjs.org/package/@vates/compose)
|
||||
|
||||
> Compose functions from left to right
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
|
||||
|
||||
```
|
||||
> npm install --save @vates/compose
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { compose } from '@vates/compose'
|
||||
|
||||
const add2 = x => x + 2
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = x => mul3(add2(x))
|
||||
const f = compose(add2, mul3)
|
||||
|
||||
console.log(f(5))
|
||||
// → 21
|
||||
```
|
||||
|
||||
> The call context (`this`) of the composed function is forwarded to all functions.
|
||||
|
||||
The first function is called with all arguments of the composed function:
|
||||
|
||||
```js
|
||||
const add = (x, y) => x + y
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = (x, y) => mul3(add(x, y))
|
||||
const f = compose(add, mul3)
|
||||
|
||||
console.log(f(4, 5))
|
||||
// → 27
|
||||
```
|
||||
|
||||
Functions may also be passed in an array:
|
||||
|
||||
```js
|
||||
const f = compose([add2, mul3])
|
||||
```
|
||||
|
||||
Options can be passed as first parameter:
|
||||
|
||||
```js
|
||||
const f = compose(
|
||||
{
|
||||
// compose async functions
|
||||
async: true,
|
||||
|
||||
// compose from right to left
|
||||
right: true,
|
||||
},
|
||||
[add2, mul3]
|
||||
)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,48 +0,0 @@
|
||||
```js
|
||||
import { compose } from '@vates/compose'
|
||||
|
||||
const add2 = x => x + 2
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = x => mul3(add2(x))
|
||||
const f = compose(add2, mul3)
|
||||
|
||||
console.log(f(5))
|
||||
// → 21
|
||||
```
|
||||
|
||||
> The call context (`this`) of the composed function is forwarded to all functions.
|
||||
|
||||
The first function is called with all arguments of the composed function:
|
||||
|
||||
```js
|
||||
const add = (x, y) => x + y
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = (x, y) => mul3(add(x, y))
|
||||
const f = compose(add, mul3)
|
||||
|
||||
console.log(f(4, 5))
|
||||
// → 27
|
||||
```
|
||||
|
||||
Functions may also be passed in an array:
|
||||
|
||||
```js
|
||||
const f = compose([add2, mul3])
|
||||
```
|
||||
|
||||
Options can be passed as first parameter:
|
||||
|
||||
```js
|
||||
const f = compose(
|
||||
{
|
||||
// compose async functions
|
||||
async: true,
|
||||
|
||||
// compose from right to left
|
||||
right: true,
|
||||
},
|
||||
[add2, mul3]
|
||||
)
|
||||
```
|
||||
@@ -1,46 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const defaultOpts = { async: false, right: false }
|
||||
|
||||
exports.compose = function compose(opts, fns) {
|
||||
if (Array.isArray(opts)) {
|
||||
fns = opts
|
||||
opts = defaultOpts
|
||||
} else if (typeof opts === 'object') {
|
||||
opts = Object.assign({}, defaultOpts, opts)
|
||||
if (!Array.isArray(fns)) {
|
||||
fns = Array.prototype.slice.call(arguments, 1)
|
||||
}
|
||||
} else {
|
||||
fns = Array.from(arguments)
|
||||
opts = defaultOpts
|
||||
}
|
||||
|
||||
const n = fns.length
|
||||
if (n === 0) {
|
||||
throw new TypeError('at least one function must be passed')
|
||||
}
|
||||
if (n === 1) {
|
||||
return fns[0]
|
||||
}
|
||||
|
||||
if (opts.right) {
|
||||
fns.reverse()
|
||||
}
|
||||
|
||||
return opts.async
|
||||
? async function () {
|
||||
let value = await fns[0].apply(this, arguments)
|
||||
for (let i = 1; i < n; ++i) {
|
||||
value = await fns[i].call(this, value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
: function () {
|
||||
let value = fns[0].apply(this, arguments)
|
||||
for (let i = 1; i < n; ++i) {
|
||||
value = fns[i].call(this, value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { compose } = require('./')
|
||||
|
||||
const add2 = x => x + 2
|
||||
const mul3 = x => x * 3
|
||||
|
||||
describe('compose()', () => {
|
||||
it('throws when no functions is passed', () => {
|
||||
expect(() => compose()).toThrow(TypeError)
|
||||
expect(() => compose([])).toThrow(TypeError)
|
||||
})
|
||||
|
||||
it('applies from left to right', () => {
|
||||
expect(compose(add2, mul3)(5)).toBe(21)
|
||||
})
|
||||
|
||||
it('accepts functions in an array', () => {
|
||||
expect(compose([add2, mul3])(5)).toBe(21)
|
||||
})
|
||||
|
||||
it('can apply from right to left', () => {
|
||||
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
|
||||
})
|
||||
|
||||
it('accepts options with functions in an array', () => {
|
||||
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
|
||||
})
|
||||
|
||||
it('can compose async functions', async () => {
|
||||
expect(
|
||||
await compose(
|
||||
{ async: true },
|
||||
async x => x + 2,
|
||||
async x => x * 3
|
||||
)(5)
|
||||
).toBe(21)
|
||||
})
|
||||
|
||||
it('forwards all args to first function', () => {
|
||||
expect.assertions(1)
|
||||
|
||||
const expectedArgs = [Math.random(), Math.random()]
|
||||
compose(
|
||||
(...args) => {
|
||||
expect(args).toEqual(expectedArgs)
|
||||
},
|
||||
// add a second function to avoid the one function special case
|
||||
Function.prototype
|
||||
)(...expectedArgs)
|
||||
})
|
||||
|
||||
it('forwards context to all functions', () => {
|
||||
expect.assertions(2)
|
||||
|
||||
const expectedThis = {}
|
||||
compose(
|
||||
function () {
|
||||
expect(this).toBe(expectedThis)
|
||||
},
|
||||
function () {
|
||||
expect(this).toBe(expectedThis)
|
||||
}
|
||||
).call(expectedThis)
|
||||
})
|
||||
})
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/compose",
|
||||
"description": "Compose functions from left to right",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/compose",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/compose",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "2.0.0",
|
||||
"engines": {
|
||||
"node": ">=7.6"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,45 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/decorate-with
|
||||
|
||||
[](https://npmjs.org/package/@vates/decorate-with)  [](https://bundlephobia.com/result?p=@vates/decorate-with) [](https://npmjs.org/package/@vates/decorate-with)
|
||||
|
||||
> Creates a decorator from a function wrapper
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/decorate-with):
|
||||
|
||||
```
|
||||
> npm install --save @vates/decorate-with
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
For instance, allows using Lodash's functions as decorators:
|
||||
|
||||
```js
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
|
||||
class Foo {
|
||||
@decorateWith(lodash.debounce, 150)
|
||||
bar() {
|
||||
// body
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,12 +0,0 @@
|
||||
For instance, allows using Lodash's functions as decorators:
|
||||
|
||||
```js
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
|
||||
class Foo {
|
||||
@decorateWith(lodash.debounce, 150)
|
||||
bar() {
|
||||
// body
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,4 +0,0 @@
|
||||
exports.decorateWith = (fn, ...args) => (target, name, descriptor) => ({
|
||||
...descriptor,
|
||||
value: fn(descriptor.value, ...args),
|
||||
})
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/decorate-with",
|
||||
"description": "Creates a decorator from a function wrapper",
|
||||
"keywords": [
|
||||
"apply",
|
||||
"decorator",
|
||||
"factory",
|
||||
"wrapper"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/decorate-with",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/decorate-with",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.0.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,89 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/disposable
|
||||
|
||||
[](https://npmjs.org/package/@vates/disposable)  [](https://bundlephobia.com/result?p=@vates/disposable) [](https://npmjs.org/package/@vates/disposable)
|
||||
|
||||
> Utilities for disposables
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
|
||||
|
||||
```
|
||||
> npm install --save @vates/disposable
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
This library contains utilities for disposables as defined by the [`promise-toolbox` library](https://github.com/JsCommunity/promise-toolbox#resource-management).
|
||||
|
||||
### `deduped(fn, keyFn)`
|
||||
|
||||
Creates a new function that wraps `fn` and instead of creating a new disposables at each call, returns copies of the same one when `keyFn` returns the same keys.
|
||||
|
||||
Those copies contains the same value and can be disposed independently, the source disposable will only be disposed when all copies are disposed.
|
||||
|
||||
`keyFn` is called with the same context and arguments as the wrapping function and must returns an array of keys which will be used to identify which disposables should be grouped together.
|
||||
|
||||
```js
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
|
||||
// the connection with the passed host will be established once at the first call, then, it will be shared with the next calls
|
||||
const getConnection = deduped(async function (host)) {
|
||||
const connection = new Connection(host)
|
||||
return new Disposabe(connection, () => connection.close())
|
||||
}, host => [host])
|
||||
```
|
||||
|
||||
### `debounceResource(disposable, delay)`
|
||||
|
||||
Creates a new disposable with the same value and with a delayed disposer.
|
||||
|
||||
On calling this disposer, the source disposable will be disposed when the `delay` is passed.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
// it will wait for 10 seconds before calling the disposer
|
||||
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
```
|
||||
|
||||
### `debounceResource.flushAll()`
|
||||
|
||||
Disposes all delayed disposers and cancels the delaying of the disposables that are in usage.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
const res1 = await debounceResource(res, 10e3)
|
||||
const res2 = await debounceResource(res, 10e3)
|
||||
const res3 = await debounceResource(res, 10e3)
|
||||
|
||||
rest1.dispose()
|
||||
rest2.dispose()
|
||||
// res3 is in usage
|
||||
|
||||
debounceResource.flushAll()
|
||||
// res1 and res2 are immediately disposed
|
||||
// res3 will be disposed immediately when its disposer will be called
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,56 +0,0 @@
|
||||
This library contains utilities for disposables as defined by the [`promise-toolbox` library](https://github.com/JsCommunity/promise-toolbox#resource-management).
|
||||
|
||||
### `deduped(fn, keyFn)`
|
||||
|
||||
Creates a new function that wraps `fn` and instead of creating a new disposables at each call, returns copies of the same one when `keyFn` returns the same keys.
|
||||
|
||||
Those copies contains the same value and can be disposed independently, the source disposable will only be disposed when all copies are disposed.
|
||||
|
||||
`keyFn` is called with the same context and arguments as the wrapping function and must returns an array of keys which will be used to identify which disposables should be grouped together.
|
||||
|
||||
```js
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
|
||||
// the connection with the passed host will be established once at the first call, then, it will be shared with the next calls
|
||||
const getConnection = deduped(async function (host)) {
|
||||
const connection = new Connection(host)
|
||||
return new Disposabe(connection, () => connection.close())
|
||||
}, host => [host])
|
||||
```
|
||||
|
||||
### `debounceResource(disposable, delay)`
|
||||
|
||||
Creates a new disposable with the same value and with a delayed disposer.
|
||||
|
||||
On calling this disposer, the source disposable will be disposed when the `delay` is passed.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
// it will wait for 10 seconds before calling the disposer
|
||||
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
```
|
||||
|
||||
### `debounceResource.flushAll()`
|
||||
|
||||
Disposes all delayed disposers and cancels the delaying of the disposables that are in usage.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
const res1 = await debounceResource(res, 10e3)
|
||||
const res2 = await debounceResource(res, 10e3)
|
||||
const res3 = await debounceResource(res, 10e3)
|
||||
|
||||
rest1.dispose()
|
||||
rest2.dispose()
|
||||
// res3 is in usage
|
||||
|
||||
debounceResource.flushAll()
|
||||
// res1 and res2 are immediately disposed
|
||||
// res3 will be disposed immediately when its disposer will be called
|
||||
```
|
||||
@@ -1,56 +0,0 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { warn } = createLogger('vates:disposable:debounceResource')
|
||||
|
||||
exports.createDebounceResource = () => {
|
||||
const flushers = new Set()
|
||||
async function debounceResource(pDisposable, delay = debounceResource.defaultDelay) {
|
||||
if (delay === 0) {
|
||||
return pDisposable
|
||||
}
|
||||
|
||||
const disposable = await pDisposable
|
||||
|
||||
let timeoutId
|
||||
const disposeWrapper = async () => {
|
||||
if (timeoutId !== undefined) {
|
||||
clearTimeout(timeoutId)
|
||||
timeoutId = undefined
|
||||
flushers.delete(flusher)
|
||||
|
||||
try {
|
||||
await disposable.dispose()
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const flusher = () => {
|
||||
const shouldDisposeNow = timeoutId !== undefined
|
||||
if (shouldDisposeNow) {
|
||||
return disposeWrapper()
|
||||
} else {
|
||||
// will dispose ASAP
|
||||
delay = 0
|
||||
}
|
||||
}
|
||||
flushers.add(flusher)
|
||||
|
||||
return {
|
||||
dispose() {
|
||||
timeoutId = setTimeout(disposeWrapper, delay)
|
||||
},
|
||||
value: disposable.value,
|
||||
}
|
||||
}
|
||||
debounceResource.flushAll = () => {
|
||||
// iterate on a sync way in order to not remove a flusher added on processing flushers
|
||||
const promise = asyncMap(flushers, flush => flush())
|
||||
flushers.clear()
|
||||
return promise
|
||||
}
|
||||
|
||||
return debounceResource
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { createDebounceResource } = require('./debounceResource')
|
||||
|
||||
jest.useFakeTimers()
|
||||
|
||||
describe('debounceResource()', () => {
|
||||
it('calls the resource disposer after 10 seconds', async () => {
|
||||
const debounceResource = createDebounceResource()
|
||||
const delay = 10e3
|
||||
const dispose = jest.fn()
|
||||
|
||||
const resource = await debounceResource(
|
||||
Promise.resolve({
|
||||
value: '',
|
||||
dispose,
|
||||
}),
|
||||
delay
|
||||
)
|
||||
|
||||
resource.dispose()
|
||||
|
||||
expect(dispose).not.toBeCalled()
|
||||
|
||||
jest.advanceTimersByTime(delay)
|
||||
|
||||
expect(dispose).toBeCalled()
|
||||
})
|
||||
})
|
||||
@@ -1,52 +0,0 @@
|
||||
const ensureArray = require('ensure-array')
|
||||
const { MultiKeyMap } = require('@vates/multi-key-map')
|
||||
|
||||
function State(factory) {
|
||||
this.factory = factory
|
||||
this.users = 0
|
||||
}
|
||||
|
||||
const call = fn => fn()
|
||||
|
||||
exports.deduped = (factory, keyFn = (...args) => args) =>
|
||||
(function () {
|
||||
const states = new MultiKeyMap()
|
||||
return function () {
|
||||
const keys = ensureArray(keyFn.apply(this, arguments))
|
||||
let state = states.get(keys)
|
||||
if (state === undefined) {
|
||||
const result = factory.apply(this, arguments)
|
||||
|
||||
const createFactory = disposable => {
|
||||
const wrapper = {
|
||||
dispose() {
|
||||
if (--state.users === 0) {
|
||||
states.delete(keys)
|
||||
return disposable.dispose()
|
||||
}
|
||||
},
|
||||
value: disposable.value,
|
||||
}
|
||||
|
||||
return () => {
|
||||
return wrapper
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof result.then !== 'function') {
|
||||
state = new State(createFactory(result))
|
||||
} else {
|
||||
result.catch(() => {
|
||||
states.delete(keys)
|
||||
})
|
||||
const pFactory = result.then(createFactory)
|
||||
state = new State(() => pFactory.then(call))
|
||||
}
|
||||
|
||||
states.set(keys, state)
|
||||
}
|
||||
|
||||
++state.users
|
||||
return state.factory()
|
||||
}
|
||||
})()
|
||||
@@ -1,76 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { deduped } = require('./deduped')
|
||||
|
||||
describe('deduped()', () => {
|
||||
it('calls the resource function only once', async () => {
|
||||
const value = {}
|
||||
const getResource = jest.fn(async () => ({
|
||||
value,
|
||||
dispose: Function.prototype,
|
||||
}))
|
||||
|
||||
const dedupedGetResource = deduped(getResource)
|
||||
|
||||
const { value: v1 } = await dedupedGetResource()
|
||||
const { value: v2 } = await dedupedGetResource()
|
||||
|
||||
expect(getResource).toHaveBeenCalledTimes(1)
|
||||
expect(v1).toBe(value)
|
||||
expect(v2).toBe(value)
|
||||
})
|
||||
|
||||
it('only disposes the source disposable when its all copies dispose', async () => {
|
||||
const dispose = jest.fn()
|
||||
const getResource = async () => ({
|
||||
value: '',
|
||||
dispose,
|
||||
})
|
||||
|
||||
const dedupedGetResource = deduped(getResource)
|
||||
|
||||
const { dispose: d1 } = await dedupedGetResource()
|
||||
const { dispose: d2 } = await dedupedGetResource()
|
||||
|
||||
d1()
|
||||
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
|
||||
d2()
|
||||
|
||||
expect(dispose).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('works with sync factory', () => {
|
||||
const value = {}
|
||||
const dispose = jest.fn()
|
||||
const dedupedGetResource = deduped(() => ({ value, dispose }))
|
||||
|
||||
const d1 = dedupedGetResource()
|
||||
expect(d1.value).toBe(value)
|
||||
|
||||
const d2 = dedupedGetResource()
|
||||
expect(d2.value).toBe(value)
|
||||
|
||||
d1.dispose()
|
||||
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
|
||||
d2.dispose()
|
||||
|
||||
expect(dispose).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('no race condition on dispose before async acquisition', async () => {
|
||||
const dispose = jest.fn()
|
||||
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
|
||||
|
||||
const d1 = await dedupedGetResource()
|
||||
|
||||
dedupedGetResource()
|
||||
|
||||
d1.dispose()
|
||||
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/disposable",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/disposable",
|
||||
"description": "Utilities for disposables",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/disposable",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,53 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/multi-key-map
|
||||
|
||||
[](https://npmjs.org/package/@vates/multi-key-map)  [](https://bundlephobia.com/result?p=@vates/multi-key-map) [](https://npmjs.org/package/@vates/multi-key-map)
|
||||
|
||||
> Create map with values affected to multiple keys
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
|
||||
|
||||
```
|
||||
> npm install --save @vates/multi-key-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,20 +0,0 @@
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
@@ -1,87 +0,0 @@
|
||||
class Node {
|
||||
constructor(value) {
|
||||
this.children = new Map()
|
||||
this.value = value
|
||||
}
|
||||
}
|
||||
|
||||
function del(node, i, keys) {
|
||||
if (i === keys.length) {
|
||||
if (node instanceof Node) {
|
||||
node.value = undefined
|
||||
return node
|
||||
}
|
||||
return
|
||||
}
|
||||
if (!(node instanceof Node)) {
|
||||
return node
|
||||
}
|
||||
const key = keys[i]
|
||||
const { children } = node
|
||||
const child = children.get(key)
|
||||
if (child === undefined) {
|
||||
return node
|
||||
}
|
||||
const newChild = del(child, i + 1, keys)
|
||||
if (newChild === undefined) {
|
||||
if (children.size === 1) {
|
||||
return node.value
|
||||
}
|
||||
children.delete(key)
|
||||
} else if (newChild !== child) {
|
||||
children.set(key, newChild)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
function get(node, i, keys) {
|
||||
return i === keys.length
|
||||
? node instanceof Node
|
||||
? node.value
|
||||
: node
|
||||
: node instanceof Node
|
||||
? get(node.children.get(keys[i]), i + 1, keys)
|
||||
: undefined
|
||||
}
|
||||
|
||||
function set(node, i, keys, value) {
|
||||
if (i === keys.length) {
|
||||
if (node instanceof Node) {
|
||||
node.value = value
|
||||
return node
|
||||
}
|
||||
return value
|
||||
}
|
||||
const key = keys[i]
|
||||
if (!(node instanceof Node)) {
|
||||
node = new Node(node)
|
||||
node.children.set(key, set(undefined, i + 1, keys, value))
|
||||
} else {
|
||||
const { children } = node
|
||||
const child = children.get(key)
|
||||
const newChild = set(child, i + 1, keys, value)
|
||||
if (newChild !== child) {
|
||||
children.set(key, newChild)
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
exports.MultiKeyMap = class MultiKeyMap {
|
||||
constructor() {
|
||||
// each node is either a value or a Node if it contains children
|
||||
this._root = undefined
|
||||
}
|
||||
|
||||
delete(keys) {
|
||||
this._root = del(this._root, 0, keys)
|
||||
}
|
||||
|
||||
get(keys) {
|
||||
return get(this._root, 0, keys)
|
||||
}
|
||||
|
||||
set(keys, value) {
|
||||
this._root = set(this._root, 0, keys, value)
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { MultiKeyMap } = require('./')
|
||||
|
||||
describe('MultiKeyMap', () => {
|
||||
it('works', () => {
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const keys = [
|
||||
// null key
|
||||
[],
|
||||
// simple key
|
||||
['foo'],
|
||||
// composite key
|
||||
['foo', 'bar'],
|
||||
// reverse composite key
|
||||
['bar', 'foo'],
|
||||
]
|
||||
const values = keys.map(() => ({}))
|
||||
|
||||
// set all values first to make sure they are all stored and not only the
|
||||
// last one
|
||||
keys.forEach((key, i) => {
|
||||
map.set(key, values[i])
|
||||
})
|
||||
|
||||
keys.forEach((key, i) => {
|
||||
// copy the key to make sure the array itself is not the key
|
||||
expect(map.get(key.slice())).toBe(values[i])
|
||||
map.delete(key.slice())
|
||||
expect(map.get(key.slice())).toBe(undefined)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/multi-key-map",
|
||||
"description": "Create map with values affected to multiple keys",
|
||||
"keywords": [
|
||||
"cache",
|
||||
"map"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/multi-key-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/multi-key-map",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,47 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/parse-duration
|
||||
|
||||
[](https://npmjs.org/package/@vates/parse-duration)  [](https://bundlephobia.com/result?p=@vates/parse-duration) [](https://npmjs.org/package/@vates/parse-duration)
|
||||
|
||||
> Small wrapper around ms to parse a duration
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/parse-duration):
|
||||
|
||||
```
|
||||
> npm install --save @vates/parse-duration
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
`ms` without magic: always parse a duration and throws if invalid.
|
||||
|
||||
```js
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
|
||||
parseDuration('2 days')
|
||||
// 172800000
|
||||
|
||||
parseDuration(172800000)
|
||||
// 172800000
|
||||
|
||||
parseDuration(undefined)
|
||||
// throws TypeError('not a valid duration: undefined')
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,14 +0,0 @@
|
||||
`ms` without magic: always parse a duration and throws if invalid.
|
||||
|
||||
```js
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
|
||||
parseDuration('2 days')
|
||||
// 172800000
|
||||
|
||||
parseDuration(172800000)
|
||||
// 172800000
|
||||
|
||||
parseDuration(undefined)
|
||||
// throws TypeError('not a valid duration: undefined')
|
||||
```
|
||||
@@ -1,12 +0,0 @@
|
||||
const ms = require('ms')
|
||||
|
||||
exports.parseDuration = value => {
|
||||
if (typeof value === 'number') {
|
||||
return value
|
||||
}
|
||||
const duration = ms(value)
|
||||
if (duration === undefined) {
|
||||
throw new TypeError(`not a valid duration: ${value}`)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/parse-duration",
|
||||
"description": "Small wrapper around ms to parse a duration",
|
||||
"keywords": [
|
||||
"duration",
|
||||
"ms",
|
||||
"parse"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/parse-duration",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/parse-duration",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"ms": "^2.1.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,46 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/read-chunk
|
||||
|
||||
[](https://npmjs.org/package/@vates/read-chunk)  [](https://bundlephobia.com/result?p=@vates/read-chunk) [](https://npmjs.org/package/@vates/read-chunk)
|
||||
|
||||
> Read a chunk of a Node stream
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
|
||||
|
||||
```
|
||||
> npm install --save @vates/read-chunk
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns `null` if the stream has ended
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
;(async () => {
|
||||
let chunk
|
||||
while ((chunk = await readChunk(stream, 1024)) !== null) {
|
||||
// do something with chunk
|
||||
}
|
||||
})()
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,13 +0,0 @@
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns `null` if the stream has ended
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
;(async () => {
|
||||
let chunk
|
||||
while ((chunk = await readChunk(stream, 1024)) !== null) {
|
||||
// do something with chunk
|
||||
}
|
||||
})()
|
||||
```
|
||||
@@ -1,30 +0,0 @@
|
||||
const readChunk = (stream, size) =>
|
||||
size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
@@ -1,43 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('returns null if stream is empty', async () => {
|
||||
expect(await readChunk(makeStream([]))).toBe(null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/read-chunk",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/read-chunk",
|
||||
"description": "Read a chunk of a Node stream",
|
||||
"license": "ISC",
|
||||
"keywords": [
|
||||
"async",
|
||||
"chunk",
|
||||
"data",
|
||||
"node",
|
||||
"promise",
|
||||
"read",
|
||||
"stream"
|
||||
],
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/read-chunk",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,59 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/toggle-scripts
|
||||
|
||||
[](https://npmjs.org/package/@vates/toggle-scripts)  [](https://bundlephobia.com/result?p=@vates/toggle-scripts) [](https://npmjs.org/package/@vates/toggle-scripts)
|
||||
|
||||
> Easily enable/disable scripts in package.json
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
|
||||
|
||||
```
|
||||
> npm install --save @vates/toggle-scripts
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
Usage: toggle-scripts options...
|
||||
|
||||
Easily enable/disable scripts in package.json
|
||||
|
||||
Options
|
||||
+<script> Enable the script <script>, ie remove the prefix `_`
|
||||
-<script> Disable the script <script>, ie prefix it with `_`
|
||||
|
||||
Examples
|
||||
toggle-scripts +postinstall +preuninstall
|
||||
toggle-scripts -postinstall -preuninstall
|
||||
```
|
||||
|
||||
For example, if you want `postinstall` hook only in dev:
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"postinstall": "<some dev only command>",
|
||||
"prepublishOnly": "toggle-scripts -postinstall",
|
||||
"postpublish": "toggle-scripts +postinstall"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,26 +0,0 @@
|
||||
```
|
||||
Usage: toggle-scripts options...
|
||||
|
||||
Easily enable/disable scripts in package.json
|
||||
|
||||
Options
|
||||
+<script> Enable the script <script>, ie remove the prefix `_`
|
||||
-<script> Disable the script <script>, ie prefix it with `_`
|
||||
|
||||
Examples
|
||||
toggle-scripts +postinstall +preuninstall
|
||||
toggle-scripts -postinstall -preuninstall
|
||||
```
|
||||
|
||||
For example, if you want `postinstall` hook only in dev:
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"postinstall": "<some dev only command>",
|
||||
"prepublishOnly": "toggle-scripts -postinstall",
|
||||
"postpublish": "toggle-scripts +postinstall"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const fs = require('fs')
|
||||
|
||||
const mapKeys = (object, iteratee) => {
|
||||
const result = {}
|
||||
for (const key of Object.keys(object)) {
|
||||
result[iteratee(key, object)] = object[key]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
const args = process.argv.slice(2)
|
||||
if (args.length === 0) {
|
||||
const { description, name, version } = require('./package.json')
|
||||
const bin = 'toggle-scripts'
|
||||
process.stdout.write(`Usage: ${bin} options...
|
||||
|
||||
${description}
|
||||
|
||||
Options
|
||||
+<script> Enable the script <script>, ie remove the prefix \`_\`
|
||||
-<script> Disable the script <script>, ie prefix it with \`_\`
|
||||
|
||||
Examples
|
||||
${bin} +postinstall +preuninstall
|
||||
${bin} -postinstall -preuninstall
|
||||
|
||||
${name} v${version}
|
||||
`)
|
||||
process.exit()
|
||||
}
|
||||
|
||||
const plan = { __proto__: null }
|
||||
for (const arg of args) {
|
||||
const action = arg[0]
|
||||
const script = arg.slice(1)
|
||||
|
||||
if (action === '+') {
|
||||
plan['_' + script] = script
|
||||
} else if (action === '-') {
|
||||
plan[script] = '_' + script
|
||||
} else {
|
||||
throw new Error('invalid param: ' + arg)
|
||||
}
|
||||
}
|
||||
|
||||
const pkgPath = process.env.npm_package_json || './package.json'
|
||||
const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'))
|
||||
pkg.scripts = mapKeys(pkg.scripts, (name, scripts) => {
|
||||
const newName = plan[name]
|
||||
if (newName === undefined) {
|
||||
return name
|
||||
}
|
||||
if (newName in scripts) {
|
||||
throw new Error('script already defined: ' + name)
|
||||
}
|
||||
return newName
|
||||
})
|
||||
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + '\n')
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/toggle-scripts",
|
||||
"description": "Easily enable/disable scripts in package.json",
|
||||
"keywords": [
|
||||
"dev",
|
||||
"disable",
|
||||
"enable",
|
||||
"lifecycle",
|
||||
"npm",
|
||||
"package.json",
|
||||
"pinst",
|
||||
"postinstall",
|
||||
"script",
|
||||
"scripts",
|
||||
"toggle"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/toggle-scripts",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/toggle-scripts",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"bin": "./index.js",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,89 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/async-map
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/async-map)  [](https://bundlephobia.com/result?p=@xen-orchestra/async-map) [](https://npmjs.org/package/@xen-orchestra/async-map)
|
||||
|
||||
> Promise.all + map for all iterables
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async-map):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/async-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### `asyncMap(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
Similar to `Promise.all + Array#map` for all iterables: calls `iteratee` for each item in `iterable`, and returns a promise of an array containing the awaited result of each calls to `iteratee`.
|
||||
|
||||
It rejects as soon as te first call to `iteratee` rejects.
|
||||
|
||||
```js
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMap(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
It can be used with any iterables (`Array`, `Map`, etc.):
|
||||
|
||||
```js
|
||||
const map = new Map()
|
||||
map.set('foo', 42)
|
||||
map.set('bar', 3.14)
|
||||
|
||||
const array = await asyncMap(map, async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the map can be accessed via `this`
|
||||
})
|
||||
```
|
||||
|
||||
#### Use with plain objects
|
||||
|
||||
Plain objects are not iterable, but you can use `Object.keys`, `Object.values` or `Object.entries` to help:
|
||||
|
||||
```js
|
||||
const object = {
|
||||
foo: 42,
|
||||
bar: 3.14,
|
||||
}
|
||||
|
||||
const array = await asyncMap(
|
||||
Object.entries(object),
|
||||
async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the object can be accessed via `this` because it's been passed as third arg
|
||||
},
|
||||
object
|
||||
)
|
||||
```
|
||||
|
||||
### `asyncMapSettled(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
Similar to `asyncMap` but waits for all promises to settle before rejecting.
|
||||
|
||||
```js
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMapSettled(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,56 +0,0 @@
|
||||
### `asyncMap(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
Similar to `Promise.all + Array#map` for all iterables: calls `iteratee` for each item in `iterable`, and returns a promise of an array containing the awaited result of each calls to `iteratee`.
|
||||
|
||||
It rejects as soon as te first call to `iteratee` rejects.
|
||||
|
||||
```js
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMap(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
It can be used with any iterables (`Array`, `Map`, etc.):
|
||||
|
||||
```js
|
||||
const map = new Map()
|
||||
map.set('foo', 42)
|
||||
map.set('bar', 3.14)
|
||||
|
||||
const array = await asyncMap(map, async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the map can be accessed via `this`
|
||||
})
|
||||
```
|
||||
|
||||
#### Use with plain objects
|
||||
|
||||
Plain objects are not iterable, but you can use `Object.keys`, `Object.values` or `Object.entries` to help:
|
||||
|
||||
```js
|
||||
const object = {
|
||||
foo: 42,
|
||||
bar: 3.14,
|
||||
}
|
||||
|
||||
const array = await asyncMap(
|
||||
Object.entries(object),
|
||||
async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the object can be accessed via `this` because it's been passed as third arg
|
||||
},
|
||||
object
|
||||
)
|
||||
```
|
||||
|
||||
### `asyncMapSettled(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
Similar to `asyncMap` but waits for all promises to settle before rejecting.
|
||||
|
||||
```js
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMapSettled(iterable, iteratee, thisArg)
|
||||
```
|
||||
@@ -1,71 +0,0 @@
|
||||
const wrapCall = (fn, arg, thisArg) => {
|
||||
try {
|
||||
return Promise.resolve(fn.call(thisArg, arg))
|
||||
} catch (error) {
|
||||
return Promise.reject(error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to Promise.all + Array#map but supports all iterables and does not trigger ESLint array-callback-return
|
||||
*
|
||||
* WARNING: Does not handle plain objects
|
||||
*
|
||||
* @template Item,This
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
|
||||
* @param {This} [thisArg]
|
||||
* @returns {Promise<Item[]>}
|
||||
*/
|
||||
exports.asyncMap = function asyncMap(iterable, mapFn, thisArg = iterable) {
|
||||
return Promise.all(Array.from(iterable, mapFn, thisArg))
|
||||
}
|
||||
|
||||
/**
|
||||
* Like `asyncMap` but wait for all promises to settle before rejecting
|
||||
*
|
||||
* @template Item,This
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
|
||||
* @param {This} [thisArg]
|
||||
* @returns {Promise<Item[]>}
|
||||
*/
|
||||
exports.asyncMapSettled = function asyncMapSettled(iterable, mapFn, thisArg = iterable) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const onError = e => {
|
||||
if (result !== undefined) {
|
||||
error = e
|
||||
result = undefined
|
||||
}
|
||||
if (--n === 0) {
|
||||
reject(error)
|
||||
}
|
||||
}
|
||||
const onValue = (i, value) => {
|
||||
const hasError = result === undefined
|
||||
if (!hasError) {
|
||||
result[i] = value
|
||||
}
|
||||
if (--n === 0) {
|
||||
if (hasError) {
|
||||
reject(error)
|
||||
} else {
|
||||
resolve(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let n = 0
|
||||
for (const item of iterable) {
|
||||
const i = n++
|
||||
wrapCall(mapFn, item, thisArg).then(value => onValue(i, value), onError)
|
||||
}
|
||||
|
||||
if (n === 0) {
|
||||
return resolve([])
|
||||
}
|
||||
|
||||
let error
|
||||
let result = new Array(n)
|
||||
})
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { asyncMapSettled } = require('./')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
describe('asyncMapSettled', () => {
|
||||
it('works', async () => {
|
||||
const values = [Math.random(), Math.random()]
|
||||
const spy = jest.fn(async v => v * 2)
|
||||
const iterable = new Set(values)
|
||||
|
||||
// returns an array containing the result of each calls
|
||||
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
|
||||
|
||||
for (let i = 0, n = values.length; i < n; ++i) {
|
||||
// each call receive the current item as sole argument
|
||||
expect(spy.mock.calls[i]).toEqual([values[i]])
|
||||
|
||||
// each call as this bind to the iterable
|
||||
expect(spy.mock.instances[i]).toBe(iterable)
|
||||
}
|
||||
})
|
||||
|
||||
it('can use a specified thisArg', () => {
|
||||
const thisArg = {}
|
||||
const spy = jest.fn()
|
||||
asyncMapSettled(['foo'], spy, thisArg)
|
||||
expect(spy.mock.instances[0]).toBe(thisArg)
|
||||
})
|
||||
|
||||
it('rejects only when all calls as resolved', async () => {
|
||||
const defers = []
|
||||
const promise = asyncMapSettled([1, 2], () => {
|
||||
let resolve, reject
|
||||
// eslint-disable-next-line promise/param-names
|
||||
const promise = new Promise((_resolve, _reject) => {
|
||||
resolve = _resolve
|
||||
reject = _reject
|
||||
})
|
||||
defers.push({ promise, resolve, reject })
|
||||
return promise
|
||||
})
|
||||
|
||||
let hasSettled = false
|
||||
promise.catch(noop).then(() => {
|
||||
hasSettled = true
|
||||
})
|
||||
|
||||
const error = new Error()
|
||||
defers[0].reject(error)
|
||||
|
||||
// wait for all microtasks to settle
|
||||
await new Promise(resolve => setImmediate(resolve))
|
||||
|
||||
expect(hasSettled).toBe(false)
|
||||
|
||||
defers[1].resolve()
|
||||
|
||||
// wait for all microtasks to settle
|
||||
await new Promise(resolve => setImmediate(resolve))
|
||||
|
||||
expect(hasSettled).toBe(true)
|
||||
await expect(promise).rejects.toBe(error)
|
||||
})
|
||||
|
||||
it('issues when latest promise rejects', async () => {
|
||||
const error = new Error()
|
||||
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
|
||||
})
|
||||
})
|
||||
@@ -1,45 +0,0 @@
|
||||
// type MaybePromise<T> = Promise<T> | T
|
||||
//
|
||||
// declare export function asyncMap<T1, T2>(
|
||||
// collection: MaybePromise<T1[]>,
|
||||
// (T1, number) => MaybePromise<T2>
|
||||
// ): Promise<T2[]>
|
||||
// declare export function asyncMap<K, V1, V2>(
|
||||
// collection: MaybePromise<{ [K]: V1 }>,
|
||||
// (V1, K) => MaybePromise<V2>
|
||||
// ): Promise<V2[]>
|
||||
|
||||
const map = require('lodash/map')
|
||||
|
||||
/**
|
||||
* Similar to map() + Promise.all() but wait for all promises to settle before
|
||||
* rejecting (with the first error)
|
||||
*
|
||||
* @deprecated Don't support iterables, please use new implementations
|
||||
*/
|
||||
module.exports = function asyncMapLegacy(collection, iteratee) {
|
||||
let then
|
||||
if (collection != null && typeof (then = collection.then) === 'function') {
|
||||
return then.call(collection, collection => asyncMapLegacy(collection, iteratee))
|
||||
}
|
||||
|
||||
let errorContainer
|
||||
const onError = error => {
|
||||
if (errorContainer === undefined) {
|
||||
errorContainer = { error }
|
||||
}
|
||||
}
|
||||
|
||||
return Promise.all(
|
||||
map(collection, (item, key, collection) =>
|
||||
new Promise(resolve => {
|
||||
resolve(iteratee(item, key, collection))
|
||||
}).catch(onError)
|
||||
)
|
||||
).then(values => {
|
||||
if (errorContainer !== undefined) {
|
||||
throw errorContainer.error
|
||||
}
|
||||
return values
|
||||
})
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/async-map",
|
||||
"version": "0.1.2",
|
||||
"license": "ISC",
|
||||
"description": "Promise.all + map for all iterables",
|
||||
"keywords": [
|
||||
"array",
|
||||
"async",
|
||||
"iterable",
|
||||
"map",
|
||||
"settled",
|
||||
"typescript"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/async-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/async-map",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,28 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/audit-core
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/audit-core)  [](https://bundlephobia.com/result?p=@xen-orchestra/audit-core) [](https://npmjs.org/package/@xen-orchestra/audit-core)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/audit-core):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/audit-core
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"name": "@xen-orchestra/audit-core",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/audit-core",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/audit-core",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"main": "dist/",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postversion": "npm publish --access public",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.7.4",
|
||||
"@babel/core": "^7.7.4",
|
||||
"@babel/plugin-proposal-decorators": "^7.8.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.0",
|
||||
"@babel/preset-env": "^7.7.4",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^0.0.1",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"core-js": "^3.6.4",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
},
|
||||
"private": false,
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
@@ -1,194 +0,0 @@
|
||||
// see https://github.com/babel/babel/issues/8450
|
||||
import 'core-js/features/symbol/async-iterator'
|
||||
|
||||
import assert from 'assert'
|
||||
import hash from 'object-hash'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const log = createLogger('xo:audit-core')
|
||||
|
||||
export class Storage {
|
||||
constructor() {
|
||||
this._lock = Promise.resolve()
|
||||
}
|
||||
|
||||
async acquireLock() {
|
||||
const lock = this._lock
|
||||
let releaseLock
|
||||
this._lock = new Promise(resolve => {
|
||||
releaseLock = resolve
|
||||
})
|
||||
await lock
|
||||
return releaseLock
|
||||
}
|
||||
}
|
||||
|
||||
// Format: $<algorithm>$<salt>$<encrypted>
|
||||
//
|
||||
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
|
||||
const ID_TO_ALGORITHM = {
|
||||
5: 'sha256',
|
||||
}
|
||||
|
||||
export class AlteredRecordError extends Error {
|
||||
constructor(id, nValid, record) {
|
||||
super('altered record')
|
||||
|
||||
this.id = id
|
||||
this.nValid = nValid
|
||||
this.record = record
|
||||
}
|
||||
}
|
||||
|
||||
export class MissingRecordError extends Error {
|
||||
constructor(id, nValid) {
|
||||
super('missing record')
|
||||
|
||||
this.id = id
|
||||
this.nValid = nValid
|
||||
}
|
||||
}
|
||||
|
||||
export const NULL_ID = 'nullId'
|
||||
|
||||
const HASH_ALGORITHM_ID = '5'
|
||||
const createHash = (data, algorithmId = HASH_ALGORITHM_ID) =>
|
||||
`$${algorithmId}$$${hash(data, {
|
||||
algorithm: ID_TO_ALGORITHM[algorithmId],
|
||||
excludeKeys: key => key === 'id',
|
||||
})}`
|
||||
|
||||
export class AuditCore {
|
||||
constructor(storage) {
|
||||
assert.notStrictEqual(storage, undefined)
|
||||
this._storage = storage
|
||||
}
|
||||
|
||||
@decorateWith(defer)
|
||||
async add($defer, subject, event, data) {
|
||||
const time = Date.now()
|
||||
$defer(await this._storage.acquireLock())
|
||||
return this._addUnsafe({
|
||||
data,
|
||||
event,
|
||||
subject,
|
||||
time,
|
||||
})
|
||||
}
|
||||
|
||||
async _addUnsafe({ data, event, subject, time }) {
|
||||
const storage = this._storage
|
||||
|
||||
// delete "undefined" properties and normalize data with JSON.stringify
|
||||
const record = JSON.parse(
|
||||
JSON.stringify({
|
||||
data,
|
||||
event,
|
||||
previousId: (await storage.getLastId()) ?? NULL_ID,
|
||||
subject,
|
||||
time,
|
||||
})
|
||||
)
|
||||
record.id = createHash(record)
|
||||
await storage.put(record)
|
||||
await storage.setLastId(record.id)
|
||||
return record
|
||||
}
|
||||
|
||||
async checkIntegrity(oldest, newest) {
|
||||
const storage = this._storage
|
||||
|
||||
// handle separated chains case
|
||||
if (newest !== (await storage.getLastId())) {
|
||||
let isNewestAccessible = false
|
||||
for await (const { id } of this.getFrom()) {
|
||||
if (id === newest) {
|
||||
isNewestAccessible = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if (!isNewestAccessible) {
|
||||
throw new MissingRecordError(newest, 0)
|
||||
}
|
||||
}
|
||||
|
||||
let nValid = 0
|
||||
while (newest !== oldest) {
|
||||
const record = await storage.get(newest)
|
||||
if (record === undefined) {
|
||||
throw new MissingRecordError(newest, nValid)
|
||||
}
|
||||
if (newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))) {
|
||||
throw new AlteredRecordError(newest, nValid, record)
|
||||
}
|
||||
newest = record.previousId
|
||||
nValid++
|
||||
}
|
||||
return nValid
|
||||
}
|
||||
|
||||
async *getFrom(newest) {
|
||||
const storage = this._storage
|
||||
|
||||
let id = newest ?? (await storage.getLastId())
|
||||
if (id === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
let record
|
||||
while ((record = await storage.get(id)) !== undefined) {
|
||||
yield record
|
||||
id = record.previousId
|
||||
}
|
||||
}
|
||||
|
||||
async deleteFrom(newest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
for await (const { id } of this.getFrom(newest)) {
|
||||
await this._storage.del(id)
|
||||
}
|
||||
}
|
||||
|
||||
@decorateWith(defer)
|
||||
async deleteRangeAndRewrite($defer, newest, oldest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
assert.notStrictEqual(oldest, undefined)
|
||||
|
||||
const storage = this._storage
|
||||
$defer(await storage.acquireLock())
|
||||
|
||||
assert.notStrictEqual(await storage.get(newest), undefined)
|
||||
const oldestRecord = await storage.get(oldest)
|
||||
assert.notStrictEqual(oldestRecord, undefined)
|
||||
|
||||
const lastId = await storage.getLastId()
|
||||
const recentRecords = []
|
||||
for await (const record of this.getFrom(lastId)) {
|
||||
if (record.id === newest) {
|
||||
break
|
||||
}
|
||||
|
||||
recentRecords.push(record)
|
||||
}
|
||||
|
||||
for await (const record of this.getFrom(newest)) {
|
||||
await storage.del(record.id)
|
||||
if (record.id === oldest) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
await storage.setLastId(oldestRecord.previousId)
|
||||
|
||||
for (const record of recentRecords) {
|
||||
try {
|
||||
await this._addUnsafe(record)
|
||||
await storage.del(record.id)
|
||||
} catch (error) {
|
||||
log.error(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } from '.'
|
||||
|
||||
const asyncIteratorToArray = async asyncIterator => {
|
||||
const array = []
|
||||
for await (const entry of asyncIterator) {
|
||||
array.push(entry)
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
class DB extends Storage {
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
this._db = new Map()
|
||||
this._lastId = undefined
|
||||
}
|
||||
|
||||
async put(record) {
|
||||
this._db.set(record.id, record)
|
||||
}
|
||||
|
||||
async setLastId(id) {
|
||||
this._lastId = id
|
||||
}
|
||||
|
||||
async getLastId() {
|
||||
return this._lastId
|
||||
}
|
||||
|
||||
async del(id) {
|
||||
this._db.delete(id)
|
||||
}
|
||||
|
||||
async get(id) {
|
||||
return this._db.get(id)
|
||||
}
|
||||
|
||||
_clear() {
|
||||
return this._db.clear()
|
||||
}
|
||||
}
|
||||
|
||||
const DATA = [
|
||||
[
|
||||
{
|
||||
name: 'subject0',
|
||||
},
|
||||
'event0',
|
||||
{},
|
||||
],
|
||||
[
|
||||
{
|
||||
name: 'subject1',
|
||||
},
|
||||
'event1',
|
||||
{},
|
||||
],
|
||||
[
|
||||
{
|
||||
name: 'subject2',
|
||||
},
|
||||
'event2',
|
||||
{},
|
||||
],
|
||||
]
|
||||
|
||||
const db = new DB()
|
||||
const auditCore = new AuditCore(db)
|
||||
const storeAuditRecords = async () => {
|
||||
await Promise.all(DATA.map(data => auditCore.add(...data)))
|
||||
const records = await asyncIteratorToArray(auditCore.getFrom())
|
||||
expect(records.length).toBe(DATA.length)
|
||||
return records
|
||||
}
|
||||
|
||||
describe('auditCore', () => {
|
||||
afterEach(() => db._clear())
|
||||
|
||||
it('detects that a record is missing', async () => {
|
||||
const [newestRecord, deletedRecord] = await storeAuditRecords()
|
||||
|
||||
const nValidRecords = await auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
expect(nValidRecords).toBe(DATA.length)
|
||||
|
||||
await db.del(deletedRecord.id)
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new MissingRecordError(deletedRecord.id, 1)
|
||||
)
|
||||
})
|
||||
|
||||
it('detects that a record has been altered', async () => {
|
||||
const [newestRecord, alteredRecord] = await storeAuditRecords()
|
||||
|
||||
alteredRecord.event = ''
|
||||
await db.put(alteredRecord)
|
||||
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new AlteredRecordError(alteredRecord.id, 1, alteredRecord)
|
||||
)
|
||||
})
|
||||
|
||||
it('confirms interval integrity after deletion of records outside of the interval', async () => {
|
||||
const [thirdRecord, secondRecord, firstRecord] = await storeAuditRecords()
|
||||
|
||||
await auditCore.deleteFrom(secondRecord.id)
|
||||
|
||||
expect(await db.get(firstRecord.id)).toBe(undefined)
|
||||
expect(await db.get(secondRecord.id)).toBe(undefined)
|
||||
|
||||
await auditCore.checkIntegrity(secondRecord.id, thirdRecord.id)
|
||||
})
|
||||
})
|
||||
@@ -1,26 +0,0 @@
|
||||
class Storage {
|
||||
acquire: () => Promise<() => undefined>
|
||||
del: (id: string) => Promise<void>
|
||||
get: (id: string) => Promise<Record | void>
|
||||
getLastId: () => Promise<string | void>
|
||||
put: (record: Record) => Promise<void>
|
||||
setLastId: (id: string) => Promise<void>
|
||||
}
|
||||
|
||||
interface Record {
|
||||
data: object
|
||||
event: string
|
||||
id: string
|
||||
previousId: string
|
||||
subject: object
|
||||
time: number
|
||||
}
|
||||
|
||||
export class AuditCore {
|
||||
constructor(storage: Storage) { }
|
||||
public add(subject: any, event: string, data: any): Promise<Record> { }
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> { }
|
||||
public getFrom(newest?: string): AsyncIterator { }
|
||||
public deleteFrom(newest: string): Promise<void> { }
|
||||
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> { }
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,18 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/babel-config
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,69 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const PLUGINS_RE = /^(?:@babel\/|babel-)plugin-.+$/
|
||||
const PRESETS_RE = /^@babel\/preset-.+$/
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const configs = {
|
||||
'@babel/plugin-proposal-decorators': {
|
||||
legacy: true,
|
||||
},
|
||||
'@babel/plugin-proposal-pipeline-operator': {
|
||||
proposal: 'minimal',
|
||||
},
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
|
||||
shippedProposals: true,
|
||||
},
|
||||
}
|
||||
|
||||
const getConfig = (key, ...args) => {
|
||||
const config = configs[key]
|
||||
return config === undefined ? {} : typeof config === 'function' ? config(...args) : config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = ['@babel/plugin-proposal-decorators', '@babel/plugin-proposal-class-properties']
|
||||
|
||||
module.exports = function (pkg, configs = {}) {
|
||||
const plugins = {}
|
||||
const presets = {}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = { ...getConfig(name, pkg), ...configs[name] }
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = { ...getConfig(name, pkg), ...configs[name] }
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __PROD__ ? [/\.spec\.js$/] : undefined,
|
||||
plugins: Object.keys(plugins)
|
||||
.map(plugin => [plugin, plugins[plugin]])
|
||||
.sort(([a], [b]) => {
|
||||
const oA = pluginsOrder.indexOf(a)
|
||||
const oB = pluginsOrder.indexOf(b)
|
||||
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
|
||||
}),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
targets: (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
}
|
||||
return { browsers: pkg.browserslist, node }
|
||||
})(),
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/babel-config",
|
||||
"version": "0.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/babel-config",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,48 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/backups-cli
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/backups-cli)  [](https://bundlephobia.com/result?p=@xen-orchestra/backups-cli) [](https://npmjs.org/package/@xen-orchestra/backups-cli)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups-cli):
|
||||
|
||||
```
|
||||
> npm install --global @xen-orchestra/backups-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
> xo-backups --help
|
||||
Usage:
|
||||
|
||||
xo-backups clean-vms [--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
|
||||
|
||||
xo-backups create-symlink-index xo-vm-backups <field path>
|
||||
|
||||
xo-backups info xo-vm-backups/*
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,17 +0,0 @@
|
||||
```
|
||||
> xo-backups --help
|
||||
Usage:
|
||||
|
||||
xo-backups clean-vms [--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
|
||||
|
||||
xo-backups create-symlink-index xo-vm-backups <field path>
|
||||
|
||||
xo-backups info xo-vm-backups/*
|
||||
```
|
||||
@@ -1,32 +0,0 @@
|
||||
const getopts = require('getopts')
|
||||
|
||||
const { version } = require('./package.json')
|
||||
|
||||
module.exports = commands =>
|
||||
async function (args, prefix) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
help: 'h',
|
||||
},
|
||||
boolean: ['help'],
|
||||
stopEarly: true,
|
||||
})
|
||||
|
||||
const commandName = opts.help || args.length === 0 ? 'help' : args[0]
|
||||
const command = commands[commandName]
|
||||
if (command === undefined) {
|
||||
process.stdout.write(`Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${prefix} ${command} ${commands[command].usage || ''}`)
|
||||
.join('\n\n')}
|
||||
|
||||
xo-backups v${version}
|
||||
`)
|
||||
process.exitCode = commandName === 'help' ? 0 : 1
|
||||
return
|
||||
}
|
||||
|
||||
return command.main(args.slice(1), prefix + ' ' + commandName)
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
const { dirname } = require('path')
|
||||
|
||||
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
|
||||
module.exports = fs
|
||||
|
||||
fs.getSize = path =>
|
||||
fs.stat(path).then(
|
||||
_ => _.size,
|
||||
error => {
|
||||
if (error.code === 'ENOENT') {
|
||||
return 0
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
fs.mktree = async function mkdirp(path) {
|
||||
try {
|
||||
await fs.mkdir(path)
|
||||
} catch (error) {
|
||||
const { code } = error
|
||||
if (code === 'EEXIST') {
|
||||
await fs.readdir(path)
|
||||
return
|
||||
}
|
||||
if (code === 'ENOENT') {
|
||||
await mkdirp(dirname(path))
|
||||
return mkdirp(path)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// - easier:
|
||||
// - single param for direct use in `Array#map`
|
||||
// - files are prefixed with directory path
|
||||
// - safer: returns empty array if path is missing or not a directory
|
||||
fs.readdir2 = path =>
|
||||
fs.readdir(path).then(
|
||||
entries => {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = `${path}/${entry}`
|
||||
})
|
||||
|
||||
return entries
|
||||
},
|
||||
error => {
|
||||
const { code } = error
|
||||
if (code === 'ENOENT') {
|
||||
// do nothing
|
||||
} else if (code === 'ENOTDIR') {
|
||||
console.warn('WARN: readdir(%s)', path, error)
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
return []
|
||||
}
|
||||
)
|
||||
|
||||
fs.symlink2 = async (target, path) => {
|
||||
try {
|
||||
await fs.symlink(target, path)
|
||||
} catch (error) {
|
||||
if (error.code === 'EEXIST' && (await fs.readlink(path)) === target) {
|
||||
return
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
|
||||
const getopts = require('getopts')
|
||||
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
|
||||
const { resolve } = require('path')
|
||||
|
||||
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
|
||||
|
||||
module.exports = async function main(args) {
|
||||
const { _, remove, merge } = getopts(args, {
|
||||
alias: {
|
||||
remove: 'r',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['merge', 'remove'],
|
||||
default: {
|
||||
merge: false,
|
||||
remove: false,
|
||||
},
|
||||
})
|
||||
|
||||
await asyncMap(_, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
try {
|
||||
await adapter.cleanVm(vmDir, { remove, merge, onLog: log => console.warn(log) })
|
||||
} catch (error) {
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
const filenamify = require('filenamify')
|
||||
const get = require('lodash/get')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { dirname, join, relative } = require('path')
|
||||
|
||||
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
|
||||
|
||||
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
|
||||
const indexDir = join(backupDir, 'indexes', filenamify(fieldPath))
|
||||
await mktree(indexDir)
|
||||
|
||||
await asyncMap(await readdir2(backupDir), async vmDir =>
|
||||
asyncMap(
|
||||
(await readdir2(vmDir)).filter(_ => _.endsWith('.json')),
|
||||
async json => {
|
||||
const metadata = JSON.parse(await readFile(json))
|
||||
const value = get(metadata, fieldPath)
|
||||
if (value !== undefined) {
|
||||
const target = relative(indexDir, dirname(json))
|
||||
const path = join(indexDir, filenamify(String(value)))
|
||||
await symlink2(target, path).catch(error => {
|
||||
console.warn('symlink(%s, %s)', target, path, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createHash } = require('crypto')
|
||||
const { dirname, resolve } = require('path')
|
||||
|
||||
const { readdir2, readFile, getSize } = require('../_fs')
|
||||
|
||||
const sha512 = str => createHash('sha512').update(str).digest('hex')
|
||||
const sum = values => values.reduce((a, b) => a + b)
|
||||
|
||||
module.exports = async function info(vmDirs) {
|
||||
const jsonFiles = (
|
||||
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
|
||||
).flat()
|
||||
|
||||
const hashes = { __proto__: null }
|
||||
|
||||
const info = (
|
||||
await asyncMap(jsonFiles, async jsonFile => {
|
||||
try {
|
||||
const jsonDir = dirname(jsonFile)
|
||||
const json = await readFile(jsonFile)
|
||||
|
||||
const hash = sha512(json)
|
||||
if (hash in hashes) {
|
||||
console.log(jsonFile, 'duplicate of', hashes[hash])
|
||||
return
|
||||
}
|
||||
hashes[hash] = jsonFile
|
||||
|
||||
const metadata = JSON.parse(json)
|
||||
|
||||
return {
|
||||
jsonDir,
|
||||
jsonFile,
|
||||
metadata,
|
||||
size:
|
||||
json.length +
|
||||
(await (metadata.mode === 'delta'
|
||||
? asyncMap(Object.values(metadata.vhds), _ => getSize(resolve(jsonDir, _))).then(sum)
|
||||
: getSize(resolve(jsonDir, metadata.xva)))),
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(jsonFile, error)
|
||||
}
|
||||
})
|
||||
).filter(_ => _ !== undefined)
|
||||
const byJobs = groupBy(info, 'metadata.jobId')
|
||||
Object.keys(byJobs)
|
||||
.sort()
|
||||
.forEach(jobId => {
|
||||
console.log(jobId, sum(byJobs[jobId].map(_ => _.size)))
|
||||
})
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
require('./_composeCommands')({
|
||||
'clean-vms': {
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: `[--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
`,
|
||||
},
|
||||
'create-symlink-index': {
|
||||
get main() {
|
||||
return require('./commands/create-symlink-index')
|
||||
},
|
||||
usage: 'xo-vm-backups <field path>',
|
||||
},
|
||||
info: {
|
||||
get main() {
|
||||
return require('./commands/info')
|
||||
},
|
||||
usage: 'xo-vm-backups/*',
|
||||
},
|
||||
})(process.argv.slice(2), 'xo-backups').catch(error => {
|
||||
console.error('main', error)
|
||||
process.exitCode = 1
|
||||
})
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"bin": {
|
||||
"xo-backups": "index.js"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.11.0",
|
||||
"@xen-orchestra/fs": "^0.17.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.19.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/backups-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,263 +0,0 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getAdapter = Disposable.factory(function* (remoteId) {
|
||||
return {
|
||||
adapter: yield getAdapter(remoteId),
|
||||
remoteId,
|
||||
}
|
||||
})
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
run() {
|
||||
const type = this._job.type
|
||||
if (type === 'backup') {
|
||||
return this._runVmBackup()
|
||||
} else if (type === 'metadataBackup') {
|
||||
return this._runMetadataBackup()
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const settings = {
|
||||
...config.defaultSettings,
|
||||
...config.metadata.defaultSettings,
|
||||
...job.settings[''],
|
||||
...job.settings[schedule.id],
|
||||
}
|
||||
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
remoteIds.map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
|
||||
const config = this._config
|
||||
const { settings } = job
|
||||
const scheduleSettings = {
|
||||
...config.defaultSettings,
|
||||
...config.vm.defaultSettings,
|
||||
...settings[''],
|
||||
...settings[schedule.id],
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (srs, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
job,
|
||||
// remotes,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...scheduleSettings, ...settings[vmUuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = scheduleSettings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
flushAll() {
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
return asyncMap(Object.keys(partitionDisposers), path => {
|
||||
const disposers = partitionDisposers[path]
|
||||
delete partitionDisposers[path]
|
||||
return asyncMap(disposers, d => d(path).catch(noop => {}))
|
||||
})
|
||||
}
|
||||
|
||||
async mount(adapter, diskId, partitionId) {
|
||||
const { value: path, dispose } = await adapter.getPartition(diskId, partitionId)
|
||||
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
if (partitionDisposers[path] === undefined) {
|
||||
partitionDisposers[path] = []
|
||||
}
|
||||
partitionDisposers[path].push(dispose)
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
async unmount(path) {
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
const disposers = partitionDisposers[path]
|
||||
if (disposers === undefined) {
|
||||
throw new Error(`No partition corresponding to the path ${path} found`)
|
||||
}
|
||||
|
||||
await disposers.pop()()
|
||||
if (disposers.length === 0) {
|
||||
delete partitionDisposers[path]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await adapter.readDeltaVmBackup(metadata)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/backups
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/backups)  [](https://bundlephobia.com/result?p=@xen-orchestra/backups) [](https://npmjs.org/package/@xen-orchestra/backups)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/backups
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,559 +0,0 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const fromEvent = require('promise-toolbox/fromEvent.js')
|
||||
const pDefer = require('promise-toolbox/defer.js')
|
||||
const pump = require('pump')
|
||||
const { basename, dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { ZipFile } = require('yazl')
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile, isVhdFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
try {
|
||||
const subFiles = await readdir(realPath)
|
||||
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOTDIR') {
|
||||
throw error
|
||||
}
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
handler.list(path, options).catch(error => {
|
||||
if (error?.code !== 'ENOENT') {
|
||||
warn(`${methodName} ${path}`, { error })
|
||||
}
|
||||
return []
|
||||
})
|
||||
|
||||
const debounceResourceFactory = factory =>
|
||||
function () {
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
class RemoteAdapter {
|
||||
constructor(handler, { debounceResource = res => res, dirMode } = {}) {
|
||||
this._debounceResource = debounceResource
|
||||
this._dirMode = dirMode
|
||||
this._handler = handler
|
||||
}
|
||||
|
||||
get handler() {
|
||||
return this._handler
|
||||
}
|
||||
|
||||
async _deleteVhd(path) {
|
||||
const handler = this._handler
|
||||
const vhds = await asyncMapSettled(
|
||||
await handler.list(dirname(path), {
|
||||
filter: isVhdFile,
|
||||
prependDir: true,
|
||||
}),
|
||||
async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
path,
|
||||
}
|
||||
} catch (error) {
|
||||
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
|
||||
// they are probably inconsequent to the backup process and should not
|
||||
// fail it.
|
||||
warn(`BackupNg#_deleteVhd ${path}`, { error })
|
||||
}
|
||||
}
|
||||
)
|
||||
const base = basename(path)
|
||||
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
|
||||
if (child === undefined) {
|
||||
await handler.unlink(path)
|
||||
return 0
|
||||
}
|
||||
|
||||
try {
|
||||
const childPath = child.path
|
||||
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
|
||||
await handler.rename(path, childPath)
|
||||
return mergedDataSize
|
||||
} catch (error) {
|
||||
handler.unlink(path).catch(warn)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _findPartition(devicePath, partitionId) {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
const partition = partitions.find(_ => _.id === partitionId)
|
||||
if (partition === undefined) {
|
||||
throw new Error(`partition ${partitionId} not found`)
|
||||
}
|
||||
return partition
|
||||
}
|
||||
|
||||
_getLvmLogicalVolumes = Disposable.factory(this._getLvmLogicalVolumes)
|
||||
_getLvmLogicalVolumes = deduped(this._getLvmLogicalVolumes, (devicePath, pvId, vgName) => [devicePath, pvId, vgName])
|
||||
_getLvmLogicalVolumes = debounceResourceFactory(this._getLvmLogicalVolumes)
|
||||
async *_getLvmLogicalVolumes(devicePath, pvId, vgName) {
|
||||
yield this._getLvmPhysicalVolume(devicePath, pvId && (await this._findPartition(devicePath, pvId)))
|
||||
|
||||
await fromCallback(execFile, 'vgchange', ['-ay', vgName])
|
||||
try {
|
||||
yield lvs(['lv_name', 'lv_path'], vgName)
|
||||
} finally {
|
||||
await fromCallback(execFile, 'vgchange', ['-an', vgName])
|
||||
}
|
||||
}
|
||||
|
||||
_getLvmPhysicalVolume = Disposable.factory(this._getLvmPhysicalVolume)
|
||||
_getLvmPhysicalVolume = deduped(this._getLvmPhysicalVolume, (devicePath, partition) => [devicePath, partition?.id])
|
||||
_getLvmPhysicalVolume = debounceResourceFactory(this._getLvmPhysicalVolume)
|
||||
async *_getLvmPhysicalVolume(devicePath, partition) {
|
||||
const args = []
|
||||
if (partition !== undefined) {
|
||||
args.push('-o', partition.start * 512, '--sizelimit', partition.size)
|
||||
}
|
||||
args.push('--show', '-f', devicePath)
|
||||
const path = (await fromCallback(execFile, 'losetup', args)).trim()
|
||||
try {
|
||||
await fromCallback(execFile, 'pvscan', ['--cache', path])
|
||||
yield path
|
||||
} finally {
|
||||
try {
|
||||
const vgNames = await pvs('vg_name', path)
|
||||
await fromCallback(execFile, 'vgchange', ['-an', ...vgNames])
|
||||
} finally {
|
||||
await fromCallback(execFile, 'losetup', ['-d', path])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_getPartition = Disposable.factory(this._getPartition)
|
||||
_getPartition = deduped(this._getPartition, (devicePath, partition) => [devicePath, partition?.id])
|
||||
_getPartition = debounceResourceFactory(this._getPartition)
|
||||
async *_getPartition(devicePath, partition) {
|
||||
const options = ['loop', 'ro']
|
||||
|
||||
if (partition !== undefined) {
|
||||
const { size, start } = partition
|
||||
options.push(`sizelimit=${size}`)
|
||||
if (start !== undefined) {
|
||||
options.push(`offset=${start * 512}`)
|
||||
}
|
||||
}
|
||||
|
||||
const path = yield getTmpDir()
|
||||
const mount = options => {
|
||||
return fromCallback(execFile, 'mount', [
|
||||
`--options=${options.join(',')}`,
|
||||
`--source=${devicePath}`,
|
||||
`--target=${path}`,
|
||||
])
|
||||
}
|
||||
|
||||
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
|
||||
// another fs, try without
|
||||
try {
|
||||
await mount([...options, 'norecovery'])
|
||||
} catch (error) {
|
||||
await mount(options)
|
||||
}
|
||||
try {
|
||||
yield path
|
||||
} finally {
|
||||
await fromCallback(execFile, 'umount', ['--lazy', path])
|
||||
}
|
||||
}
|
||||
|
||||
_listLvmLogicalVolumes(devicePath, partition, results = []) {
|
||||
return Disposable.use(this._getLvmPhysicalVolume(devicePath, partition), async path => {
|
||||
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], path)
|
||||
const partitionId = partition !== undefined ? partition.id : ''
|
||||
lvs.forEach((lv, i) => {
|
||||
const name = lv.lv_name
|
||||
if (name !== '') {
|
||||
results.push({
|
||||
id: `${partitionId}/${lv.vg_name}/${name}`,
|
||||
name,
|
||||
size: lv.lv_size,
|
||||
})
|
||||
}
|
||||
})
|
||||
return results
|
||||
})
|
||||
}
|
||||
|
||||
_usePartitionFiles = Disposable.factory(this._usePartitionFiles)
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
).catch(error => {
|
||||
warn(error)
|
||||
reject(error)
|
||||
})
|
||||
return promise
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
let mergedDataSize = 0
|
||||
await asyncMapSettled(backups, ({ _filename, vhds }) =>
|
||||
Promise.all([
|
||||
handler.unlink(_filename),
|
||||
asyncMap(Object.values(vhds), async _ => {
|
||||
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
|
||||
}),
|
||||
])
|
||||
)
|
||||
return mergedDataSize
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
const uuidReg = '\\w{8}(-\\w{4}){3}-\\w{12}'
|
||||
const metadataDirReg = 'xo-(config|pool-metadata)-backups'
|
||||
const timestampReg = '\\d{8}T\\d{6}Z'
|
||||
const regexp = new RegExp(`^${metadataDirReg}/${uuidReg}(/${uuidReg})?/${timestampReg}`)
|
||||
if (!regexp.test(backupId)) {
|
||||
throw new Error(`The id (${backupId}) not correspond to a metadata folder`)
|
||||
}
|
||||
|
||||
await this._handler.rmtree(backupId)
|
||||
}
|
||||
|
||||
async deleteOldMetadataBackups(dir, retention) {
|
||||
const handler = this.handler
|
||||
let list = await handler.list(dir)
|
||||
list.sort()
|
||||
list = list.filter(timestamp => /^\d{8}T\d{6}Z$/.test(timestamp)).slice(0, -retention)
|
||||
await asyncMapSettled(list, timestamp => handler.rmtree(`${dir}/${timestamp}`))
|
||||
}
|
||||
|
||||
async deleteFullVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
await asyncMapSettled(backups, ({ _filename, xva }) =>
|
||||
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
|
||||
)
|
||||
}
|
||||
|
||||
async deleteVmBackup(filename) {
|
||||
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
|
||||
metadata._filename = filename
|
||||
|
||||
if (metadata.mode === 'delta') {
|
||||
await this.deleteDeltaVmBackups([metadata])
|
||||
} else if (metadata.mode === 'full') {
|
||||
await this.deleteFullVmBackups([metadata])
|
||||
} else {
|
||||
throw new Error(`no deleter for backup mode ${metadata.mode}`)
|
||||
}
|
||||
}
|
||||
|
||||
getDisk = Disposable.factory(this.getDisk)
|
||||
getDisk = deduped(this.getDisk, diskId => [diskId])
|
||||
getDisk = debounceResourceFactory(this.getDisk)
|
||||
async *getDisk(diskId) {
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
let max = 0
|
||||
let maxEntry
|
||||
const entries = await readdir(mountDir)
|
||||
entries.forEach(entry => {
|
||||
const matches = RE_VHDI.exec(entry)
|
||||
if (matches !== null) {
|
||||
const value = +matches[1]
|
||||
if (value > max) {
|
||||
max = value
|
||||
maxEntry = entry
|
||||
}
|
||||
}
|
||||
})
|
||||
if (max === 0) {
|
||||
throw new Error('no disks found')
|
||||
}
|
||||
|
||||
yield `${mountDir}/${maxEntry}`
|
||||
} finally {
|
||||
await fromCallback(execFile, 'fusermount', ['-uz', mountDir])
|
||||
}
|
||||
}
|
||||
|
||||
// partitionId values:
|
||||
//
|
||||
// - undefined: raw disk
|
||||
// - `<partitionId>`: partitioned disk
|
||||
// - `<pvId>/<vgName>/<lvName>`: LVM on a partitioned disk
|
||||
// - `/<vgName>/lvName>`: LVM on a raw disk
|
||||
getPartition = Disposable.factory(this.getPartition)
|
||||
async *getPartition(diskId, partitionId) {
|
||||
const devicePath = yield this.getDisk(diskId)
|
||||
if (partitionId === undefined) {
|
||||
return yield this._getPartition(devicePath)
|
||||
}
|
||||
|
||||
const isLvmPartition = partitionId.includes('/')
|
||||
if (isLvmPartition) {
|
||||
const [pvId, vgName, lvName] = partitionId.split('/')
|
||||
const lvs = yield this._getLvmLogicalVolumes(devicePath, pvId !== '' ? pvId : undefined, vgName)
|
||||
return yield this._getPartition(lvs.find(_ => _.lv_name === lvName).lv_path)
|
||||
}
|
||||
|
||||
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const handler = this._handler
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
backups[vmUuid] = vmBackups
|
||||
})
|
||||
|
||||
return backups
|
||||
}
|
||||
|
||||
listPartitionFiles(diskId, partitionId, path) {
|
||||
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
|
||||
path = resolveSubpath(rootPath, path)
|
||||
|
||||
const entriesMap = {}
|
||||
await asyncMap(await readdir(path), async name => {
|
||||
try {
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return entriesMap
|
||||
})
|
||||
}
|
||||
|
||||
listPartitions(diskId) {
|
||||
return Disposable.use(this.getDisk(diskId), async devicePath => {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
|
||||
if (partitions.length === 0) {
|
||||
try {
|
||||
// handle potential raw LVM physical volume
|
||||
return await this._listLvmLogicalVolumes(devicePath, undefined, partitions)
|
||||
} catch (error) {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
const results = []
|
||||
await asyncMapSettled(partitions, partition =>
|
||||
partition.type === LVM_PARTITION_TYPE
|
||||
? this._listLvmLogicalVolumes(devicePath, partition, results)
|
||||
: results.push(partition)
|
||||
)
|
||||
return results
|
||||
})
|
||||
}
|
||||
|
||||
async listPoolMetadataBackups() {
|
||||
const handler = this._handler
|
||||
const safeReaddir = createSafeReaddir(handler, 'listPoolMetadataBackups')
|
||||
|
||||
const backupsByPool = {}
|
||||
await asyncMap(await safeReaddir(DIR_XO_POOL_METADATA_BACKUPS, { prependDir: true }), async scheduleDir =>
|
||||
asyncMap(await safeReaddir(scheduleDir), async poolId => {
|
||||
const backups = backupsByPool[poolId] ?? (backupsByPool[poolId] = [])
|
||||
return asyncMap(await safeReaddir(`${scheduleDir}/${poolId}`, { prependDir: true }), async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: backupDir,
|
||||
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
|
||||
})
|
||||
} catch (error) {
|
||||
warn(`listPoolMetadataBackups ${backupDir}`, {
|
||||
error,
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
)
|
||||
|
||||
// delete empty entries and sort backups
|
||||
Object.keys(backupsByPool).forEach(poolId => {
|
||||
const backups = backupsByPool[poolId]
|
||||
if (backups.length === 0) {
|
||||
delete backupsByPool[poolId]
|
||||
} else {
|
||||
backups.sort(compareTimestamp)
|
||||
}
|
||||
})
|
||||
|
||||
return backupsByPool
|
||||
}
|
||||
|
||||
async listVmBackups(vmUuid, predicate) {
|
||||
const handler = this._handler
|
||||
const backups = []
|
||||
|
||||
try {
|
||||
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
|
||||
filter: isMetadataFile,
|
||||
prependDir: true,
|
||||
})
|
||||
await asyncMap(files, async file => {
|
||||
try {
|
||||
const metadata = await this.readVmBackupMetadata(file)
|
||||
if (predicate === undefined || predicate(metadata)) {
|
||||
// inject an id usable by importVmBackupNg()
|
||||
metadata.id = metadata._filename
|
||||
|
||||
backups.push(metadata)
|
||||
}
|
||||
} catch (error) {
|
||||
warn(`listVmBackups ${file}`, { error })
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
let code
|
||||
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async listXoMetadataBackups() {
|
||||
const handler = this._handler
|
||||
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
|
||||
|
||||
const backups = []
|
||||
await asyncMap(await safeReaddir(DIR_XO_CONFIG_BACKUPS, { prependDir: true }), async scheduleDir =>
|
||||
asyncMap(await safeReaddir(scheduleDir, { prependDir: true }), async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: backupDir,
|
||||
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
|
||||
})
|
||||
} catch (error) {
|
||||
warn(`listXoMetadataBackups ${backupDir}`, { error })
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata) {
|
||||
const handler = this._handler
|
||||
const { vbds, vdis, vhds, vifs, vm } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async id => {
|
||||
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
|
||||
})
|
||||
|
||||
return {
|
||||
streams,
|
||||
vbds,
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm,
|
||||
}
|
||||
}
|
||||
|
||||
readFullVmBackup(metadata) {
|
||||
return this._handler.createReadStream(resolve('/', dirname(metadata._filename), metadata.xva))
|
||||
}
|
||||
|
||||
async readVmBackupMetadata(path) {
|
||||
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
|
||||
}
|
||||
}
|
||||
|
||||
Object.assign(RemoteAdapter.prototype, {
|
||||
cleanVm(vmDir, { lock = true } = {}) {
|
||||
if (lock) {
|
||||
return Disposable.use(this._handler.lock(vmDir), () => cleanVm.apply(this, arguments))
|
||||
} else {
|
||||
return cleanVm.apply(this, arguments)
|
||||
}
|
||||
},
|
||||
isValidXva,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
@@ -1,24 +0,0 @@
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
const CancelToken = require('promise-toolbox/CancelToken.js')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const logAfterEnd = () => {
|
||||
throw new Error('task has already ended')
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Create a serializable object from an error.
|
||||
//
|
||||
// Otherwise some fields might be non-enumerable and missing from logs.
|
||||
const serializeError = error =>
|
||||
error instanceof Error
|
||||
? {
|
||||
...error, // Copy enumerable properties.
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
name: error.name,
|
||||
stack: error.stack,
|
||||
}
|
||||
: error
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
}
|
||||
|
||||
static run(opts, fn) {
|
||||
return new this(opts).run(fn, true)
|
||||
}
|
||||
|
||||
static wrapFn(opts, fn) {
|
||||
// compatibility with @decorateWith
|
||||
if (typeof fn !== 'function') {
|
||||
;[fn, opts] = [opts, fn]
|
||||
}
|
||||
|
||||
return function () {
|
||||
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
|
||||
#cancelToken
|
||||
#id = Math.random().toString(36).slice(2)
|
||||
#onLog
|
||||
#zone
|
||||
|
||||
constructor({ name, data, onLog }) {
|
||||
let parentCancelToken, parentId
|
||||
if (onLog === undefined) {
|
||||
const parent = Zone.current.data[$$task]
|
||||
if (parent === undefined) {
|
||||
onLog = noop
|
||||
} else {
|
||||
onLog = log => parent.#onLog(log)
|
||||
parentCancelToken = parent.#cancelToken
|
||||
parentId = parent.#id
|
||||
}
|
||||
}
|
||||
|
||||
const zone = Zone.current.fork('@xen-orchestra/backups/Task')
|
||||
zone.data[$$task] = this
|
||||
this.#zone = zone
|
||||
|
||||
const { cancel, token } = CancelToken.source(parentCancelToken && [parentCancelToken])
|
||||
this.#cancelToken = token
|
||||
this.cancel = cancel
|
||||
|
||||
this.#onLog = onLog
|
||||
|
||||
this.#log('start', {
|
||||
data,
|
||||
message: name,
|
||||
parentId,
|
||||
})
|
||||
}
|
||||
|
||||
failure(error) {
|
||||
this.#end('failure', serializeError(error))
|
||||
}
|
||||
|
||||
info(message, data) {
|
||||
this.#log('info', { data, message })
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a function in the context of this task
|
||||
*
|
||||
* In case of error, the task will be failed.
|
||||
*
|
||||
* @typedef Result
|
||||
* @param {() => Result)} fn
|
||||
* @param {boolean} last - Whether the task should succeed if there is no error
|
||||
* @returns Result
|
||||
*/
|
||||
run(fn, last = false) {
|
||||
return this.#zone.run(() => {
|
||||
try {
|
||||
const result = fn()
|
||||
let then
|
||||
if (result != null && typeof (then = result.then) === 'function') {
|
||||
then.call(result, last && (value => this.success(value)), error => this.failure(error))
|
||||
} else if (last) {
|
||||
this.success(result)
|
||||
}
|
||||
return result
|
||||
} catch (error) {
|
||||
this.failure(error)
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
success(value) {
|
||||
this.#end('success', value)
|
||||
}
|
||||
|
||||
warning(message, data) {
|
||||
this.#log('warning', { data, message })
|
||||
}
|
||||
|
||||
wrapFn(fn, last) {
|
||||
const task = this
|
||||
return function () {
|
||||
return task.run(() => fn.apply(this, arguments), last)
|
||||
}
|
||||
}
|
||||
|
||||
#end(status, result) {
|
||||
this.#log('end', { result, status })
|
||||
this.#onLog = logAfterEnd
|
||||
}
|
||||
|
||||
#log(event, props) {
|
||||
this.#onLog({
|
||||
...props,
|
||||
event,
|
||||
taskId: this.#id,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
this._pool = pool
|
||||
this._remoteAdapters = remoteAdapters
|
||||
this._schedule = schedule
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
_exportPoolMetadata() {
|
||||
const xapi = this._pool.$xapi
|
||||
return xapi.getResource(PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Export pool metadata'),
|
||||
})
|
||||
}
|
||||
|
||||
async run() {
|
||||
const timestamp = Date.now()
|
||||
|
||||
const { _job: job, _schedule: schedule, _pool: pool } = this
|
||||
const poolDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${schedule.id}/${pool.$id}`
|
||||
const dir = `${poolDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const stream = await this._exportPoolMetadata()
|
||||
const fileName = `${dir}/data`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
pool,
|
||||
poolMaster: pool.$master,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
Object.entries(this._remoteAdapters),
|
||||
([remoteId, adapter]) =>
|
||||
Task.run(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}) for the remote (${remoteId}). (${job.id})`,
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
// forkStreamUnpipe should be used in a sync way, do not wait for a promise before using it
|
||||
await adapter.outputStream(fileName, forkStreamUnpipe(stream), { checksum: false })
|
||||
await adapter.handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode: this._config.dirMode,
|
||||
})
|
||||
await adapter.deleteOldMetadataBackups(poolDir, this._settings.retentionPoolMetadata)
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,382 +0,0 @@
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
exports.VmBackup = class VmBackup {
|
||||
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.remotes = remotes
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, warnMessage, parallel = true) {
|
||||
const writers = this._writers
|
||||
if (writers.size === 0) {
|
||||
return
|
||||
}
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await fn(writer)
|
||||
} catch (error) {
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new Error('all targets have failed, step: ' + warnMessage)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
|
||||
|
||||
const { scheduleId } = this
|
||||
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
|
||||
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
const xapi = this._xapi
|
||||
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (!presentBaseVdis.has(baseUuid)) {
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
run = defer(this.run)
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(() => writer.afterBackup())
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
this._remoteAdapters = remoteAdapters
|
||||
this._schedule = schedule
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const timestamp = Date.now()
|
||||
|
||||
const { _job: job, _schedule: schedule } = this
|
||||
const scheduleDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = job.xoMetadata
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
Object.entries(this._remoteAdapters),
|
||||
([remoteId, adapter]) =>
|
||||
Task.run(
|
||||
{
|
||||
name: `Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
await adapter.deleteOldMetadataBackups(scheduleDir, this._settings.retentionXoMetadata)
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.cheksum')
|
||||
@@ -1,155 +0,0 @@
|
||||
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
class BackupWorker {
|
||||
#config
|
||||
#job
|
||||
#recordToXapi
|
||||
#remoteOptions
|
||||
#remotes
|
||||
#schedule
|
||||
#xapiOptions
|
||||
#xapis
|
||||
|
||||
constructor({ config, job, recordToXapi, remoteOptions, remotes, resourceCacheDelay, schedule, xapiOptions, xapis }) {
|
||||
this.#config = config
|
||||
this.#job = job
|
||||
this.#recordToXapi = recordToXapi
|
||||
this.#remoteOptions = remoteOptions
|
||||
this.#remotes = remotes
|
||||
this.#schedule = schedule
|
||||
this.#xapiOptions = xapiOptions
|
||||
this.#xapis = xapis
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
debounceResource.defaultDelay = parseDuration(resourceCacheDelay)
|
||||
this.debounceResource = debounceResource
|
||||
}
|
||||
|
||||
run() {
|
||||
return new Backup({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
const xapiId = this.#recordToXapi[uuid]
|
||||
if (xapiId === undefined) {
|
||||
throw new Error('no XAPI associated to ' + uuid)
|
||||
}
|
||||
|
||||
const xapi = yield this.getXapi(this.#xapis[xapiId])
|
||||
return xapi.getRecordByUuid(type, uuid)
|
||||
}).bind(this),
|
||||
job: this.#job,
|
||||
schedule: this.#schedule,
|
||||
}).run()
|
||||
}
|
||||
|
||||
getAdapter = Disposable.factory(this.getAdapter)
|
||||
getAdapter = deduped(this.getAdapter, remote => [remote.url])
|
||||
getAdapter = compose(this.getAdapter, function (resource) {
|
||||
return this.debounceResource(resource)
|
||||
})
|
||||
async *getAdapter(remote) {
|
||||
const handler = getHandler(remote, this.#remoteOptions)
|
||||
await handler.sync()
|
||||
try {
|
||||
yield new RemoteAdapter(handler, {
|
||||
debounceResource: this.debounceResource,
|
||||
dirMode: this.#config.dirMode,
|
||||
})
|
||||
} finally {
|
||||
await handler.forget()
|
||||
}
|
||||
}
|
||||
|
||||
getXapi = Disposable.factory(this.getXapi)
|
||||
getXapi = deduped(this.getXapi, ({ url }) => [url])
|
||||
getXapi = compose(this.getXapi, function (resource) {
|
||||
return this.debounceResource(resource)
|
||||
})
|
||||
async *getXapi({ credentials: { username: user, password }, ...opts }) {
|
||||
const xapi = new Xapi({
|
||||
...this.#xapiOptions,
|
||||
...opts,
|
||||
auth: {
|
||||
user,
|
||||
password,
|
||||
},
|
||||
})
|
||||
|
||||
await xapi.connect()
|
||||
try {
|
||||
await xapi.objectsFetched
|
||||
|
||||
yield xapi
|
||||
} finally {
|
||||
await xapi.disconnect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Received message:
|
||||
//
|
||||
// Message {
|
||||
// action: 'run'
|
||||
// data: object
|
||||
// runWithLogs: boolean
|
||||
// }
|
||||
//
|
||||
// Sent message:
|
||||
//
|
||||
// Message {
|
||||
// type: 'log' | 'result'
|
||||
// data?: object
|
||||
// status?: 'success' | 'failure'
|
||||
// result?: any
|
||||
// }
|
||||
process.on('message', async message => {
|
||||
if (message.action === 'run') {
|
||||
const backupWorker = new BackupWorker(message.data)
|
||||
try {
|
||||
const result = message.runWithLogs
|
||||
? await Task.run(
|
||||
{
|
||||
name: 'backup run',
|
||||
onLog: data =>
|
||||
process.send({
|
||||
data,
|
||||
type: 'log',
|
||||
}),
|
||||
},
|
||||
() => backupWorker.run()
|
||||
)
|
||||
: await backupWorker.run()
|
||||
|
||||
process.send({
|
||||
type: 'result',
|
||||
result,
|
||||
status: 'success',
|
||||
})
|
||||
} catch (error) {
|
||||
process.send({
|
||||
type: 'result',
|
||||
result: error,
|
||||
status: 'failure',
|
||||
})
|
||||
} finally {
|
||||
await ignoreErrors.call(backupWorker.debounceResource.flushAll())
|
||||
process.disconnect()
|
||||
}
|
||||
}
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user