Compare commits

..

13 Commits

Author SHA1 Message Date
Florent BEAUCHAMP
862d9a6a7f feat: use additionnal file for checksum instead of attributes 2023-07-24 18:08:19 +02:00
Florent BEAUCHAMP
06cabcfb21 use chunk filters to store dedup 2023-07-24 15:20:01 +02:00
Florent BEAUCHAMP
50f378ec1e fixup! feat(fs): use multiplatform module instead of call to local binary 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
506a6aad08 feat(backup): show dedup status in restore popup + cleanup and tests 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
447112b583 feat(fs): use multiplatform module instead of call to local binary 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
b380e085d2 feat(backups): store dedup information in filepath 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
d752b1ed70 tests and docs 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
16f4fcfd04 refacto and tests 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
69a0e0e563 fixes following review 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
456e4f213b feat: parser 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
a6d24a6dfa test and fixes 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
391c778515 fix(cleanVm): handle broken-er alias 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
4e125ede88 feat(@xen-orchestra/fs): implement deduplication for vhd directory 2023-07-20 10:12:02 +02:00
855 changed files with 17495 additions and 36842 deletions

View File

@@ -15,10 +15,9 @@ module.exports = {
overrides: [
{
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js', '**/scripts/**.{,c,m}js'],
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
rules: {
'n/no-process-exit': 'off',
'n/shebang': 'off',
'no-console': 'off',
},
},
@@ -47,58 +46,6 @@ module.exports = {
],
},
},
{
files: ['@xen-orchestra/{web-core,lite,web}/**/*.{vue,ts}'],
parserOptions: {
sourceType: 'module',
},
plugins: ['import'],
extends: [
'plugin:import/recommended',
'plugin:import/typescript',
'plugin:vue/vue3-recommended',
'@vue/eslint-config-typescript/recommended',
'@vue/eslint-config-prettier',
],
settings: {
'import/resolver': {
typescript: true,
'eslint-import-resolver-custom-alias': {
alias: {
'@core': '../web-core/lib',
'@': './src',
},
extensions: ['.ts'],
packages: ['@xen-orchestra/lite', '@xen-orchestra/web'],
},
},
},
rules: {
'no-void': 'off',
'n/no-missing-import': 'off', // using 'import' plugin instead to support TS aliases
'@typescript-eslint/no-explicit-any': 'off',
'vue/require-default-prop': 'off', // https://github.com/vuejs/eslint-plugin-vue/issues/2051
},
},
{
files: ['@xen-orchestra/{web-core,lite,web}/src/pages/**/*.vue'],
parserOptions: {
sourceType: 'module',
},
rules: {
'vue/multi-word-component-names': 'off',
},
},
{
files: ['@xen-orchestra/{web-core,lite,web}/typed-router.d.ts'],
parserOptions: {
sourceType: 'module',
},
rules: {
'eslint-comments/disable-enable-pair': 'off',
'eslint-comments/no-unlimited-disable': 'off',
},
},
],
parserOptions: {
@@ -121,11 +68,6 @@ module.exports = {
'no-console': ['error', { allow: ['warn', 'error'] }],
// this rule can prevent race condition bugs like parallel `a += await foo()`
//
// as it has a lots of false positive, it is only enabled as a warning for now
'require-atomic-updates': 'warn',
strict: 'error',
},
}

48
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,48 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
1. ⚠️ **If you don't follow this template, the issue will be closed**.
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
Are you using XOA or XO from the sources?
If XOA:
- which release channel? (`stable` vs `latest`)
- please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
If XO from the sources:
- Provide **your commit number**. If it's older than a week, we won't investigate
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please provide the following information):**
- Node: [e.g. 16.12.1]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**
Add any other context about the problem here.

View File

@@ -1,119 +0,0 @@
name: Bug Report
description: Create a report to help us improve
labels: ['type: bug :bug:', 'status: triaging :triangular_flag_on_post:']
body:
- type: markdown
attributes:
value: |
1. ⚠️ **If you don't follow this template, the issue will be closed**.
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
- type: markdown
attributes:
value: '## Are you using XOA or XO from the sources?'
- type: dropdown
id: xo-origin
attributes:
label: Are you using XOA or XO from the sources?
options:
- XOA
- XO from the sources
- both
validations:
required: false
- type: markdown
attributes:
value: '### If XOA:'
- type: dropdown
id: xoa-channel
attributes:
label: Which release channel?
description: please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
options:
- stable
- latest
- both
validations:
required: false
- type: markdown
attributes:
value: '### If XO from the sources:'
- type: markdown
attributes:
value: |
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
- type: input
id: xo-sources-commit-number
attributes:
label: Provide your commit number
description: If it's older than a week, we won't investigate
placeholder: e.g. 579f0
validations:
required: false
- type: markdown
attributes:
value: '## Bug description:'
- type: textarea
id: bug-description
attributes:
label: Describe the bug
description: A clear and concise description of what the bug is
validations:
required: true
- type: textarea
id: error-message
attributes:
label: Error message
render: Text
validations:
required: false
- type: textarea
id: steps
attributes:
label: To reproduce
description: 'Steps to reproduce the behavior:'
value: |
1. Go to '...'
2. Click on '...'
3. Scroll down to '...'
4. See error
validations:
required: false
- type: textarea
id: expected-behavior
attributes:
label: Expected behavior
description: A clear and concise description of what you expected to happen
validations:
required: false
- type: textarea
id: screenshots
attributes:
label: Screenshots
description: If applicable, add screenshots to help explain your problem
validations:
required: false
- type: markdown
attributes:
value: '## Environment (please provide the following information):'
- type: input
id: node-version
attributes:
label: Node
placeholder: e.g. 16.12.1
validations:
required: true
- type: input
id: hypervisor-version
attributes:
label: Hypervisor
placeholder: e.g. XCP-ng 8.2.0
validations:
required: true
- type: textarea
id: additional-context
attributes:
label: Additional context
description: Add any other context about the problem here
validations:
required: false

View File

@@ -24,12 +24,8 @@ jobs:
cache: 'yarn'
- name: Install project dependencies
run: yarn
- name: Ensure yarn.lock is up-to-date
run: git diff --exit-code yarn.lock
- name: Build the project
run: yarn build
- name: Unit tests
run: yarn test-unit
- name: Lint tests
run: yarn test-lint
- name: Integration tests

4
.gitignore vendored
View File

@@ -30,12 +30,8 @@ pnpm-debug.log.*
yarn-error.log
yarn-error.log.*
.env
*.tsbuildinfo
# code coverage
.nyc_output/
coverage/
.turbo/
# https://node-tap.org/dot-tap-folder/
.tap/

View File

@@ -1,11 +1,8 @@
'use strict'
module.exports = {
arrowParens: 'avoid',
jsxSingleQuote: true,
semi: false,
singleQuote: true,
trailingComma: 'es5',
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
//

View File

@@ -33,7 +33,8 @@
"test": "node--test"
},
"devDependencies": {
"sinon": "^17.0.1",
"sinon": "^15.0.1",
"tap": "^16.3.0",
"test": "^3.2.1"
}
}

View File

@@ -62,42 +62,6 @@ decorateClass(Foo, {
})
```
### `decorateObject(object, map)`
Decorates an object the same way `decorateClass()` decorates a class:
```js
import { decorateObject } from '@vates/decorate-with'
const object = {
get bar() {
// body
},
set bar(value) {
// body
},
baz() {
// body
},
}
decorateObject(object, {
// getter and/or setter
bar: {
// without arguments
get: lodash.memoize,
// with arguments
set: [lodash.debounce, 150],
},
// method (with or without arguments)
baz: lodash.curry,
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.

View File

@@ -80,42 +80,6 @@ decorateClass(Foo, {
})
```
### `decorateObject(object, map)`
Decorates an object the same way `decorateClass()` decorates a class:
```js
import { decorateObject } from '@vates/decorate-with'
const object = {
get bar() {
// body
},
set bar(value) {
// body
},
baz() {
// body
},
}
decorateObject(object, {
// getter and/or setter
bar: {
// without arguments
get: lodash.memoize,
// with arguments
set: [lodash.debounce, 150],
},
// method (with or without arguments)
baz: lodash.curry,
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.

View File

@@ -14,13 +14,10 @@ function applyDecorator(decorator, value) {
}
exports.decorateClass = exports.decorateMethodsWith = function decorateClass(klass, map) {
return decorateObject(klass.prototype, map)
}
function decorateObject(object, map) {
const { prototype } = klass
for (const name of Object.keys(map)) {
const decorator = map[name]
const descriptor = getOwnPropertyDescriptor(object, name)
const descriptor = getOwnPropertyDescriptor(prototype, name)
if (typeof decorator === 'function' || Array.isArray(decorator)) {
descriptor.value = applyDecorator(decorator, descriptor.value)
} else {
@@ -33,11 +30,10 @@ function decorateObject(object, map) {
}
}
defineProperty(object, name, descriptor)
defineProperty(prototype, name, descriptor)
}
return object
return klass
}
exports.decorateObject = decorateObject
exports.perInstance = function perInstance(fn, decorator, ...args) {
const map = new WeakMap()

View File

@@ -13,15 +13,12 @@ describe('decorateWith', () => {
const expectedFn = Function.prototype
const newFn = () => {}
const decorator = decorateWith(
function wrapper(fn, ...args) {
assert.deepStrictEqual(fn, expectedFn)
assert.deepStrictEqual(args, expectedArgs)
const decorator = decorateWith(function wrapper(fn, ...args) {
assert.deepStrictEqual(fn, expectedFn)
assert.deepStrictEqual(args, expectedArgs)
return newFn
},
...expectedArgs
)
return newFn
}, ...expectedArgs)
const descriptor = {
configurable: true,

View File

@@ -20,7 +20,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "2.1.0",
"version": "2.0.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.5",
"version": "0.1.4",
"engines": {
"node": ">=8.10"
},
@@ -23,13 +23,13 @@
"test": "node--test"
},
"dependencies": {
"@vates/multi-key-map": "^0.2.0",
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"ensure-array": "^1.0.0"
},
"devDependencies": {
"sinon": "^17.0.1",
"sinon": "^15.0.1",
"test": "^3.2.1"
}
}

View File

@@ -20,9 +20,6 @@ function assertListeners(t, event, listeners) {
}
t.beforeEach(function (t) {
// work around https://github.com/tapjs/tapjs/issues/998
t.context = {}
t.context.ee = new EventEmitter()
t.context.em = new EventListenersManager(t.context.ee)
})

View File

@@ -38,9 +38,9 @@
"version": "1.0.1",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --allow-incomplete-coverage"
"test": "tap --branches=72"
},
"devDependencies": {
"tap": "^18.7.0"
"tap": "^16.2.0"
}
}

View File

@@ -1,28 +0,0 @@
Mount a vhd generated by xen-orchestra to filesystem
### Library
```js
import { mount } from 'fuse-vhd'
// return a disposable, see promise-toolbox/Disposable
// unmount automatically when disposable is disposed
// in case of differencing VHD, it mounts the full chain
await mount(handler, diskId, mountPoint)
```
### cli
From the install folder :
```
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
```
After installing the package
```
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
```
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`

View File

@@ -1,59 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/fuse-vhd
[![Package Version](https://badgen.net/npm/v/@vates/fuse-vhd)](https://npmjs.org/package/@vates/fuse-vhd) ![License](https://badgen.net/npm/license/@vates/fuse-vhd) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/fuse-vhd)](https://bundlephobia.com/result?p=@vates/fuse-vhd) [![Node compatibility](https://badgen.net/npm/node/@vates/fuse-vhd)](https://npmjs.org/package/@vates/fuse-vhd)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/fuse-vhd):
```sh
npm install --save @vates/fuse-vhd
```
## Usage
Mount a vhd generated by xen-orchestra to filesystem
### Library
```js
import { mount } from 'fuse-vhd'
// return a disposable, see promise-toolbox/Disposable
// unmount automatically when disposable is disposed
// in case of differencing VHD, it mounts the full chain
await mount(handler, diskId, mountPoint)
```
### cli
From the install folder :
```
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
```
After installing the package
```
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
```
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env node
import Disposable from 'promise-toolbox/Disposable'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { mount } from './index.mjs'
async function* main([remoteUrl, vhdPathInRemote, mountPoint]) {
if (mountPoint === undefined) {
throw new TypeError('missing arg: cli <remoteUrl> <vhdPathInRemote> <mountPoint>')
}
const handler = yield getSyncedHandler({ url: remoteUrl })
const mounted = await mount(handler, vhdPathInRemote, mountPoint)
let disposePromise
process.on('SIGINT', async () => {
// ensure single dispose
if (!disposePromise) {
disposePromise = mounted.dispose()
}
await disposePromise
process.exit()
})
}
Disposable.wrap(main)(process.argv.slice(2))

View File

@@ -58,7 +58,7 @@ export const mount = Disposable.factory(async function* mount(handler, diskPath,
},
})
return new Disposable(
() => fromCallback(cb => fuse.unmount(cb)),
fromCallback(cb => fuse.mount(cb))
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -1,6 +1,6 @@
{
"name": "@vates/fuse-vhd",
"version": "2.1.0",
"version": "1.0.0",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
@@ -19,14 +19,10 @@
},
"main": "./index.mjs",
"dependencies": {
"@xen-orchestra/fs": "^4.1.4",
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.9.0"
},
"bin": {
"xo-fuse-vhd": "./cli.mjs"
"vhd-lib": "^4.5.0"
},
"scripts": {
"postversion": "npm publish --access public"

View File

@@ -17,14 +17,4 @@ map.get(['foo', 'bar']) // 2
map.get(['bar', 'foo']) // 3
map.get([OBJ]) // 4
map.get([{}]) // undefined
map.delete([])
for (const [key, value] of map.entries() {
console.log(key, value)
}
for (const value of map.values()) {
console.log(value)
}
```

View File

@@ -35,16 +35,6 @@ map.get(['foo', 'bar']) // 2
map.get(['bar', 'foo']) // 3
map.get([OBJ]) // 4
map.get([{}]) // undefined
map.delete([])
for (const [key, value] of map.entries() {
console.log(key, value)
}
for (const value of map.values()) {
console.log(value)
}
```
## Contributions

View File

@@ -36,31 +36,14 @@ function del(node, i, keys) {
return node
}
function* entries(node, key) {
if (node !== undefined) {
if (node instanceof Node) {
const { value } = node
if (value !== undefined) {
yield [key, node.value]
}
for (const [childKey, child] of node.children.entries()) {
yield* entries(child, key.concat(childKey))
}
} else {
yield [key, node]
}
}
}
function get(node, i, keys) {
return i === keys.length
? node instanceof Node
? node.value
: node
: node instanceof Node
? get(node.children.get(keys[i]), i + 1, keys)
: undefined
? get(node.children.get(keys[i]), i + 1, keys)
: undefined
}
function set(node, i, keys, value) {
@@ -86,22 +69,6 @@ function set(node, i, keys, value) {
return node
}
function* values(node) {
if (node !== undefined) {
if (node instanceof Node) {
const { value } = node
if (value !== undefined) {
yield node.value
}
for (const child of node.children.values()) {
yield* values(child)
}
} else {
yield node
}
}
}
exports.MultiKeyMap = class MultiKeyMap {
constructor() {
// each node is either a value or a Node if it contains children
@@ -112,10 +79,6 @@ exports.MultiKeyMap = class MultiKeyMap {
this._root = del(this._root, 0, keys)
}
entries() {
return entries(this._root, [])
}
get(keys) {
return get(this._root, 0, keys)
}
@@ -123,8 +86,4 @@ exports.MultiKeyMap = class MultiKeyMap {
set(keys, value) {
this._root = set(this._root, 0, keys, value)
}
values() {
return values(this._root)
}
}

View File

@@ -19,7 +19,7 @@ describe('MultiKeyMap', () => {
// reverse composite key
['bar', 'foo'],
]
const values = keys.map(() => Math.random())
const values = keys.map(() => ({}))
// set all values first to make sure they are all stored and not only the
// last one
@@ -27,12 +27,6 @@ describe('MultiKeyMap', () => {
map.set(key, values[i])
})
assert.deepEqual(
Array.from(map.entries()),
keys.map((key, i) => [key, values[i]])
)
assert.deepEqual(Array.from(map.values()), values)
keys.forEach((key, i) => {
// copy the key to make sure the array itself is not the key
assert.strictEqual(map.get(key.slice()), values[i])

View File

@@ -18,7 +18,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.2.0",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -1,9 +1,10 @@
import assert from 'node:assert'
import { Socket } from 'node:net'
import { connect } from 'node:tls'
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
import { fromCallback, pRetry, pDelay, pTimeout } from 'promise-toolbox'
import { readChunkStrict } from '@vates/read-chunk'
import { createLogger } from '@xen-orchestra/log'
import {
INIT_PASSWD,
NBD_CMD_READ,
@@ -20,6 +21,7 @@ import {
OPTS_MAGIC,
NBD_CMD_DISC,
} from './constants.mjs'
const { warn } = createLogger('vates:nbd-client')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
@@ -38,7 +40,6 @@ export default class NbdClient {
#readBlockRetries
#reconnectRetry
#connectTimeout
#messageTimeout
// AFAIK, there is no guaranty the server answers in the same order as the queries
// so we handle a backlog of command waiting for response and handle concurrency manually
@@ -51,14 +52,7 @@ export default class NbdClient {
#reconnectingPromise
constructor(
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
{
connectTimeout = 6e4,
messageTimeout = 6e4,
waitBeforeReconnect = 1e3,
readAhead = 10,
readBlockRetries = 5,
reconnectRetry = 5,
} = {}
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
) {
this.#serverAddress = address
this.#serverPort = port
@@ -69,7 +63,6 @@ export default class NbdClient {
this.#readBlockRetries = readBlockRetries
this.#reconnectRetry = reconnectRetry
this.#connectTimeout = connectTimeout
this.#messageTimeout = messageTimeout
}
get exportSize() {
@@ -122,27 +115,13 @@ export default class NbdClient {
if (!this.#connected) {
return
}
this.#connected = false
const socket = this.#serverSocket
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
buffer.writeBigUInt64BE(queryId, 8)
buffer.writeBigUInt64BE(0n, 16)
buffer.writeInt32BE(0, 24)
const promise = pFromCallback(cb => {
socket.end(buffer, 'utf8', cb)
})
try {
await pTimeout.call(promise, this.#messageTimeout)
} catch (error) {
socket.destroy()
}
await this.#write(buffer)
await this.#serverSocket.destroy()
this.#serverSocket = undefined
this.#connected = false
}
@@ -216,13 +195,11 @@ export default class NbdClient {
}
#read(length) {
const promise = readChunkStrict(this.#serverSocket, length)
return pTimeout.call(promise, this.#messageTimeout)
return readChunkStrict(this.#serverSocket, length)
}
#write(buffer) {
const promise = fromCallback.call(this.#serverSocket, 'write', buffer)
return pTimeout.call(promise, this.#messageTimeout)
return fromCallback.call(this.#serverSocket, 'write', buffer)
}
async #readInt32() {
@@ -255,20 +232,19 @@ export default class NbdClient {
}
try {
this.#waitingForResponse = true
const buffer = await this.#read(16)
const magic = buffer.readInt32BE(0)
const magic = await this.#readInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
}
const error = buffer.readInt32BE(4)
const error = await this.#readInt32()
if (error !== 0) {
// @todo use error code from constants.mjs
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = buffer.readBigUInt64BE(8)
const blockQueryId = await this.#readInt64()
const query = this.#commandQueryBacklog.get(blockQueryId)
if (!query) {
throw new Error(` no query associated with id ${blockQueryId}`)
@@ -289,7 +265,7 @@ export default class NbdClient {
}
}
async #readBlock(index, size) {
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
// we don't want to add anything in backlog while reconnecting
if (this.#reconnectingPromise) {
await this.#reconnectingPromise
@@ -305,13 +281,7 @@ export default class NbdClient {
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
const offset = BigInt(index) * BigInt(size)
const remaining = this.#exportSize - offset
if (remaining < BigInt(size)) {
size = Number(remaining)
}
buffer.writeBigUInt64BE(offset, 16)
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
buffer.writeInt32BE(size, 24)
return new Promise((resolve, reject) => {
@@ -337,13 +307,45 @@ export default class NbdClient {
})
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
return pRetry(() => this.#readBlock(index, size), {
tries: this.#readBlockRetries,
onRetry: async err => {
warn('will retry reading block ', index, err)
await this.reconnect()
},
})
async *readBlocks(indexGenerator) {
// default : read all blocks
if (indexGenerator === undefined) {
const exportSize = this.#exportSize
const chunkSize = 2 * 1024 * 1024
indexGenerator = function* () {
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
for (let index = 0; BigInt(index) < nbBlocks; index++) {
yield { index, size: chunkSize }
}
}
}
const readAhead = []
const readAheadMaxLength = this.#readAhead
const makeReadBlockPromise = (index, size) => {
const promise = pRetry(() => this.readBlock(index, size), {
tries: this.#readBlockRetries,
onRetry: async err => {
warn('will retry reading block ', index, err)
await this.reconnect()
},
})
// error is handled during unshift
promise.catch(() => {})
return promise
}
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
for (const { index, size } of indexGenerator()) {
// stack readAheadMaxLength promises before starting to handle the results
if (readAhead.length === readAheadMaxLength) {
// any error will stop reading blocks
yield readAhead.shift()
}
readAhead.push(makeReadBlockPromise(index, size))
}
while (readAhead.length > 0) {
yield readAhead.shift()
}
}
}

View File

@@ -1,85 +0,0 @@
import { asyncEach } from '@vates/async-each'
import { NBD_DEFAULT_BLOCK_SIZE } from './constants.mjs'
import NbdClient from './index.mjs'
import { createLogger } from '@xen-orchestra/log'
const { warn } = createLogger('vates:nbd-client:multi')
export default class MultiNbdClient {
#clients = []
#readAhead
get exportSize() {
return this.#clients[0].exportSize
}
constructor(settings, { nbdConcurrency = 8, readAhead = 16, ...options } = {}) {
this.#readAhead = readAhead
if (!Array.isArray(settings)) {
settings = [settings]
}
for (let i = 0; i < nbdConcurrency; i++) {
this.#clients.push(
new NbdClient(settings[i % settings.length], { ...options, readAhead: Math.ceil(readAhead / nbdConcurrency) })
)
}
}
async connect() {
const connectedClients = []
for (const clientId in this.#clients) {
const client = this.#clients[clientId]
try {
await client.connect()
connectedClients.push(client)
} catch (err) {
client.disconnect().catch(() => {})
warn(`can't connect to one nbd client`, { err })
}
}
if (connectedClients.length === 0) {
throw new Error(`Fail to connect to any Nbd client`)
}
if (connectedClients.length < this.#clients.length) {
warn(
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${this.#clients.length} expected clients`
)
this.#clients = connectedClients
}
}
async disconnect() {
await asyncEach(this.#clients, client => client.disconnect(), {
stopOnError: false,
})
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const clientId = index % this.#clients.length
return this.#clients[clientId].readBlock(index, size)
}
async *readBlocks(indexGenerator) {
// default : read all blocks
const readAhead = []
const makeReadBlockPromise = (index, size) => {
const promise = this.readBlock(index, size)
// error is handled during unshift
promise.catch(() => {})
return promise
}
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
for (const { index, size } of indexGenerator()) {
// stack readAheadMaxLength promises before starting to handle the results
if (readAhead.length === this.#readAhead) {
// any error will stop reading blocks
yield readAhead.shift()
}
readAhead.push(makeReadBlockPromise(index, size))
}
while (readAhead.length > 0) {
yield readAhead.shift()
}
}
}

View File

@@ -13,25 +13,25 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "3.0.0",
"version": "1.2.1",
"engines": {
"node": ">=14.0"
},
"main": "./index.mjs",
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.2.0",
"@vates/read-chunk": "^1.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"promise-toolbox": "^0.21.0",
"xen-api": "^2.0.1"
"xen-api": "^1.3.3"
},
"devDependencies": {
"tap": "^18.7.0",
"tap": "^16.3.0",
"tmp": "^0.2.1"
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "tap --allow-incomplete-coverage"
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
}
}

View File

@@ -1,3 +1,4 @@
import NbdClient from '../index.mjs'
import { spawn, exec } from 'node:child_process'
import fs from 'node:fs/promises'
import { test } from 'tap'
@@ -6,10 +7,8 @@ import { pFromCallback } from 'promise-toolbox'
import { Socket } from 'node:net'
import { NBD_DEFAULT_PORT } from '../constants.mjs'
import assert from 'node:assert'
import MultiNbdClient from '../multi.mjs'
const CHUNK_SIZE = 1024 * 1024 // non default size
const FILE_SIZE = 1024 * 1024 * 9.5 // non aligned file size
const FILE_SIZE = 10 * 1024 * 1024
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
@@ -82,7 +81,7 @@ test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
let nbdServer = await spawnNbdKit(path)
const client = new MultiNbdClient(
const client = new NbdClient(
{
address: '127.0.0.1',
exportname: 'MY_SECRET_EXPORT',
@@ -110,13 +109,13 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
`,
},
{
nbdConcurrency: 1,
readAhead: 2,
}
)
await client.connect()
tap.equal(client.exportSize, BigInt(FILE_SIZE))
const CHUNK_SIZE = 1024 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
@@ -128,9 +127,9 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
})
let i = 0
for await (const block of nbdIterator) {
let blockOk = block.length === Math.min(CHUNK_SIZE, FILE_SIZE - CHUNK_SIZE * i)
let blockOk = true
let firstFail
for (let j = 0; j < block.length; j += 4) {
for (let j = 0; j < CHUNK_SIZE; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
@@ -138,7 +137,7 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content ${block.length}`)
tap.ok(blockOk, `check block ${i} content`)
i++
// flaky server is flaky
@@ -148,6 +147,17 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
nbdServer = await spawnNbdKit(path)
}
}
// we can reuse the conneciton to read other blocks
// default iterator
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
let nb = 0
for await (const block of nbdIteratorWithDefaultBlockIterator) {
nb++
tap.equal(block.length, 2 * 1024 * 1024)
}
tap.equal(nb, 5)
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
await client.disconnect()

View File

@@ -1,6 +1,6 @@
{
"name": "@vates/node-vsphere-soap",
"version": "2.0.0",
"version": "1.0.0",
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
"main": "lib/client.mjs",
"author": "reedog117",

View File

@@ -1,5 +1,5 @@
import { strict as assert } from 'node:assert'
import test from 'test'
import { describe, it } from 'tap/mocha'
import {
generateHotp,
@@ -11,8 +11,6 @@ import {
verifyTotp,
} from './index.mjs'
const { describe, it } = test
describe('generateSecret', function () {
it('generates a string of 32 chars', async function () {
const secret = generateSecret()

View File

@@ -31,9 +31,9 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"test": "tap"
},
"devDependencies": {
"test": "^3.3.0"
"tap": "^16.3.0"
}
}

View File

@@ -1,7 +1,7 @@
'use strict'
const assert = require('assert/strict')
const { describe, it } = require('test')
const { describe, it } = require('tap').mocha
const { every, not, some } = require('./')

View File

@@ -32,9 +32,9 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"test": "tap"
},
"devDependencies": {
"test": "^3.3.0"
"tap": "^16.0.1"
}
}

View File

@@ -1,7 +1,6 @@
'use strict'
const assert = require('assert')
const isUtf8 = require('isutf8')
/**
* Read a chunk of data from a stream.
@@ -22,41 +21,41 @@ const readChunk = (stream, size) =>
stream.errored != null
? Promise.reject(stream.errored)
: stream.closed || stream.readableEnded
? Promise.resolve(null)
: new Promise((resolve, reject) => {
if (size !== undefined) {
assert(size > 0)
? Promise.resolve(null)
: new Promise((resolve, reject) => {
if (size !== undefined) {
assert(size > 0)
// per Node documentation:
// > The size argument must be less than or equal to 1 GiB.
assert(size < 1073741824)
}
// per Node documentation:
// > The size argument must be less than or equal to 1 GiB.
assert(size < 1073741824)
}
function onEnd() {
resolve(null)
function onEnd() {
resolve(null)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read(size)
if (data !== null) {
resolve(data)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read(size)
if (data !== null) {
resolve(data)
removeListeners()
}
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
exports.readChunk = readChunk
/**
@@ -82,13 +81,6 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
if (size !== undefined && chunk.length !== size) {
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
// Buffer.isUtf8 is too recent for now
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
if (chunk.length < 1024 && isUtf8(chunk)) {
error.text = chunk.toString('utf8')
}
Object.defineProperties(error, {
chunk: {
value: chunk,
@@ -111,42 +103,42 @@ async function skip(stream, size) {
return stream.errored != null
? Promise.reject(stream.errored)
: size === 0 || stream.closed || stream.readableEnded
? Promise.resolve(0)
: new Promise((resolve, reject) => {
let left = size
function onEnd() {
resolve(size - left)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read()
left -= data === null ? 0 : data.length
if (left > 0) {
// continue to read
} else {
// if more than wanted has been read, push back the rest
if (left < 0) {
stream.unshift(data.slice(left))
}
resolve(size)
removeListeners()
? Promise.resolve(0)
: new Promise((resolve, reject) => {
let left = size
function onEnd() {
resolve(size - left)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read()
left -= data === null ? 0 : data.length
if (left > 0) {
// continue to read
} else {
// if more than wanted has been read, push back the rest
if (left < 0) {
stream.unshift(data.slice(left))
}
resolve(size)
removeListeners()
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
exports.skip = skip

View File

@@ -102,37 +102,12 @@ describe('readChunkStrict', function () {
assert.strictEqual(error.chunk, undefined)
})
it('throws if stream ends with not enough data, utf8', async () => {
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
assert.strictEqual(error.text, 'foobar')
assert.deepEqual(error.chunk, Buffer.from('foobar'))
})
it('throws if stream ends with not enough data, non utf8 ', async () => {
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
assert.strictEqual(error.text, undefined)
assert.deepEqual(error.chunk, Buffer.concat(source))
})
it('throws if stream ends with not enough data, utf8 , long data', async () => {
const source = Buffer.from('a'.repeat(1500))
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
assert(error instanceof Error)
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
assert.strictEqual(error.text, undefined)
assert.deepEqual(error.chunk, source)
})
it('succeed', async () => {
const source = Buffer.from('a'.repeat(20))
const chunk = await readChunkStrict(makeStream([source]), 10)
assert.deepEqual(source.subarray(10), chunk)
})
})
describe('skip', function () {
@@ -159,16 +134,6 @@ describe('skip', function () {
it('returns less size if stream ends', async () => {
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
})
it('put back if it read too much', async () => {
let source = makeStream(['foo', 'bar'])
await skip(source, 1) // read part of data chunk
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
assert.strictEqual(chunk, 'oo')
source = makeStream(['foo', 'bar'])
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
})
})
describe('skipStrict', function () {
@@ -179,9 +144,4 @@ describe('skipStrict', function () {
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
assert.deepEqual(error.bytesSkipped, 7)
})
it('succeed', async () => {
const source = makeStream(['foo', 'bar', 'baz'])
const res = await skipStrict(source, 4)
assert.strictEqual(res, undefined)
})
})

View File

@@ -19,7 +19,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.2.0",
"version": "1.1.1",
"engines": {
"node": ">=8.10"
},
@@ -33,8 +33,5 @@
},
"devDependencies": {
"test": "^3.2.1"
},
"dependencies": {
"isutf8": "^4.0.0"
}
}

View File

@@ -27,7 +27,7 @@
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=12.3"
"node": ">=10"
},
"scripts": {
"postversion": "npm publish --access public",

View File

@@ -123,7 +123,7 @@ const onProgress = makeOnProgress({
onTaskUpdate(taskLog) {},
})
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
```
It can also be fed event logs directly:

View File

@@ -111,7 +111,7 @@ const onProgress = makeOnProgress({
// current status of the task as described in the previous section
taskLog.status
// undefined or a dictionary of properties attached to the task
// undefined or a dictionnary of properties attached to the task
taskLog.properties
// timestamp at which the abortion was requested, undefined otherwise
@@ -139,7 +139,7 @@ const onProgress = makeOnProgress({
onTaskUpdate(taskLog) {},
})
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
```
It can also be fed event logs directly:

View File

@@ -35,7 +35,7 @@
"test": "node--test"
},
"devDependencies": {
"sinon": "^17.0.1",
"sinon": "^15.0.1",
"test": "^3.2.1"
}
}

View File

@@ -1,7 +1,7 @@
'use strict'
const assert = require('assert/strict')
const { afterEach, describe, it } = require('test')
const { afterEach, describe, it } = require('tap').mocha
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')

View File

@@ -13,10 +13,10 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"test": "tap --lines 67 --functions 92 --branches 52 --statements 67"
},
"dependencies": {
"@vates/decorate-with": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@xen-orchestra/log": "^0.6.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
@@ -28,6 +28,6 @@
"url": "https://vates.fr"
},
"devDependencies": {
"test": "^3.3.0"
"tap": "^16.0.1"
}
}

View File

@@ -7,9 +7,9 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.44.6",
"@xen-orchestra/fs": "^4.1.4",
"filenamify": "^6.0.0",
"@xen-orchestra/backups": "^0.39.0",
"@xen-orchestra/fs": "^4.0.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0"
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "1.0.14",
"version": "1.0.9",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -4,229 +4,23 @@ import { formatFilenameDate } from './_filenameDate.mjs'
import { importIncrementalVm } from './_incrementalVm.mjs'
import { Task } from './Task.mjs'
import { watchStreamSize } from './_watchStreamSize.mjs'
import { VhdNegative, VhdSynthetic } from 'vhd-lib'
import { decorateClass } from '@vates/decorate-with'
import { createLogger } from '@xen-orchestra/log'
import { dirname, join } from 'node:path'
import pickBy from 'lodash/pickBy.js'
import { defer } from 'golike-defer'
const { debug, info, warn } = createLogger('xo:backups:importVmBackup')
async function resolveUuid(xapi, cache, uuid, type) {
if (uuid == null) {
return uuid
}
const ref = cache.get(uuid)
if (ref === undefined) {
cache.set(uuid, xapi.call(`${type}.get_by_uuid`, uuid))
}
return cache.get(uuid)
}
export class ImportVmBackup {
constructor({
adapter,
metadata,
srUuid,
xapi,
settings: { additionnalVmTag, newMacAddresses, mapVdisSrs = {}, useDifferentialRestore = false } = {},
}) {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
this._adapter = adapter
this._importIncrementalVmSettings = { additionnalVmTag, newMacAddresses, mapVdisSrs, useDifferentialRestore }
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
this._metadata = metadata
this._srUuid = srUuid
this._xapi = xapi
}
async #getPathOfVdiSnapshot(snapshotUuid) {
const metadata = this._metadata
if (this._pathToVdis === undefined) {
const backups = await this._adapter.listVmBackups(
this._metadata.vm.uuid,
({ mode, timestamp }) => mode === 'delta' && timestamp >= metadata.timestamp
)
const map = new Map()
for (const backup of backups) {
for (const [vdiRef, vdi] of Object.entries(backup.vdis)) {
map.set(vdi.uuid, backup.vhds[vdiRef])
}
}
this._pathToVdis = map
}
return this._pathToVdis.get(snapshotUuid)
}
async _reuseNearestSnapshot($defer, ignoredVdis) {
const metadata = this._metadata
const { mapVdisSrs } = this._importIncrementalVmSettings
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
const streams = {}
const metdataDir = dirname(metadata._filename)
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
for (const [vdiRef, vdi] of Object.entries(vdis)) {
const vhdPath = join(metdataDir, vhds[vdiRef])
let xapiDisk
try {
xapiDisk = await this._xapi.getRecordByUuid('VDI', vdi.$snapshot_of$uuid)
} catch (err) {
// if this disk is not present anymore, fall back to default restore
warn(err)
}
let snapshotCandidate, backupCandidate
if (xapiDisk !== undefined) {
debug('found disks, wlll search its snapshots', { snapshots: xapiDisk.snapshots })
for (const snapshotRef of xapiDisk.snapshots) {
const snapshot = await this._xapi.getRecord('VDI', snapshotRef)
debug('handling snapshot', { snapshot })
// take only the first snapshot
if (snapshotCandidate && snapshotCandidate.snapshot_time < snapshot.snapshot_time) {
debug('already got a better candidate')
continue
}
// have a corresponding backup more recent than metadata ?
const pathToSnapshotData = await this.#getPathOfVdiSnapshot(snapshot.uuid)
if (pathToSnapshotData === undefined) {
debug('no backup linked to this snaphot')
continue
}
if (snapshot.$SR.uuid !== (mapVdisSrs[vdi.$snapshot_of$uuid] ?? this._srUuid)) {
debug('not restored on the same SR', { snapshotSr: snapshot.$SR.uuid, mapVdisSrs, srUuid: this._srUuid })
continue
}
debug('got a candidate', pathToSnapshotData)
snapshotCandidate = snapshot
backupCandidate = pathToSnapshotData
}
}
let stream
const backupWithSnapshotPath = join(metdataDir, backupCandidate ?? '')
if (vhdPath === backupWithSnapshotPath) {
// all the data are already on the host
debug('direct reuse of a snapshot')
stream = null
vdis[vdiRef].baseVdi = snapshotCandidate
// go next disk , we won't use this stream
continue
}
let disposableDescendants
const disposableSynthetic = await VhdSynthetic.fromVhdChain(this._adapter._handler, vhdPath)
// this will also clean if another disk of this VM backup fails
// if user really only need to restore non failing disks he can retry with ignoredVdis
let disposed = false
const disposeOnce = async () => {
if (!disposed) {
disposed = true
try {
await disposableDescendants?.dispose()
await disposableSynthetic?.dispose()
} catch (error) {
warn('openVhd: failed to dispose VHDs', { error })
}
}
}
$defer.onFailure(() => disposeOnce())
const parentVhd = disposableSynthetic.value
await parentVhd.readBlockAllocationTable()
debug('got vhd synthetic of parents', parentVhd.length)
if (snapshotCandidate !== undefined) {
try {
debug('will try to use differential restore', {
backupWithSnapshotPath,
vhdPath,
vdiRef,
})
disposableDescendants = await VhdSynthetic.fromVhdChain(this._adapter._handler, backupWithSnapshotPath, {
until: vhdPath,
})
const descendantsVhd = disposableDescendants.value
await descendantsVhd.readBlockAllocationTable()
debug('got vhd synthetic of descendants')
const negativeVhd = new VhdNegative(parentVhd, descendantsVhd)
debug('got vhd negative')
// update the stream with the negative vhd stream
stream = await negativeVhd.stream()
vdis[vdiRef].baseVdi = snapshotCandidate
} catch (error) {
// can be a broken VHD chain, a vhd chain with a key backup, ....
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
warn(`can't use differential restore`, { error })
disposableDescendants?.dispose()
}
}
// didn't make a negative stream : fallback to classic stream
if (stream === undefined) {
debug('use legacy restore')
stream = await parentVhd.stream()
}
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
stream.on('error', disposeOnce)
info('everything is ready, will transfer', stream.length)
streams[`${vdiRef}.vhd`] = stream
}
return {
streams,
vbds,
vdis,
version: '1.0.0',
vifs,
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
}
}
async #decorateIncrementalVmMetadata() {
const { additionnalVmTag, mapVdisSrs, useDifferentialRestore } = this._importIncrementalVmSettings
const ignoredVdis = new Set(
Object.entries(mapVdisSrs)
.filter(([_, srUuid]) => srUuid === null)
.map(([vdiUuid]) => vdiUuid)
)
let backup
if (useDifferentialRestore) {
backup = await this._reuseNearestSnapshot(ignoredVdis)
} else {
backup = await this._adapter.readIncrementalVmBackup(this._metadata, ignoredVdis)
}
const xapi = this._xapi
const cache = new Map()
const mapVdisSrRefs = {}
if (additionnalVmTag !== undefined) {
backup.vm.tags.push(additionnalVmTag)
}
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
}
const srRef = await resolveUuid(xapi, cache, this._srUuid, 'SR')
Object.values(backup.vdis).forEach(vdi => {
vdi.SR = mapVdisSrRefs[vdi.uuid] ?? srRef
})
return backup
}
async run() {
const adapter = this._adapter
const metadata = this._metadata
const isFull = metadata.mode === 'full'
const sizeContainer = { size: 0 }
const { newMacAddresses } = this._importIncrementalVmSettings
let backup
if (isFull) {
backup = await adapter.readFullVmBackup(metadata)
@@ -234,7 +28,12 @@ export class ImportVmBackup {
} else {
assert.strictEqual(metadata.mode, 'delta')
backup = await this.#decorateIncrementalVmMetadata()
const ignoredVdis = new Set(
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
.filter(([_, srUuid]) => srUuid === null)
.map(([vdiUuid]) => vdiUuid)
)
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
}
@@ -249,7 +48,8 @@ export class ImportVmBackup {
const vmRef = isFull
? await xapi.VM_import(backup, srRef)
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
newMacAddresses,
...this._importIncrementalVmSettings,
detectBase: false,
})
await Promise.all([
@@ -259,13 +59,6 @@ export class ImportVmBackup {
vmRef,
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
),
xapi.call(
'VM.set_name_description',
vmRef,
`Restored on ${formatFilenameDate(+new Date())} from ${adapter._handler._remote.name} -
${metadata.vm.name_description}
`
),
])
return {
@@ -276,5 +69,3 @@ export class ImportVmBackup {
)
}
}
decorateClass(ImportVmBackup, { _reuseNearestSnapshot: defer })

View File

@@ -5,7 +5,7 @@ import { createLogger } from '@xen-orchestra/log'
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
import { decorateMethodsWith } from '@vates/decorate-with'
import { deduped } from '@vates/disposable/deduped.js'
import { dirname, join, resolve } from 'node:path'
import { dirname, join, normalize, resolve } from 'node:path'
import { execFile } from 'child_process'
import { mount } from '@vates/fuse-vhd'
import { readdir, lstat } from 'node:fs/promises'
@@ -18,7 +18,6 @@ import fromEvent from 'promise-toolbox/fromEvent'
import groupBy from 'lodash/groupBy.js'
import pDefer from 'promise-toolbox/defer'
import pickBy from 'lodash/pickBy.js'
import tar from 'tar'
import zlib from 'zlib'
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
@@ -35,8 +34,6 @@ export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
const IMMUTABILTY_METADATA_FILENAME = '/immutability.json'
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -44,23 +41,20 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
const noop = Function.prototype
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
const makeRelative = path => resolve('/', path).slice(1)
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
for (const relativePath of relativePaths) {
const realPath = join(realBasePath, relativePath)
const virtualPath = join(virtualBasePath, relativePath)
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const stats = await lstat(realPath)
const { mode, mtime } = stats
const opts = { mode, mtime }
if (stats.isDirectory()) {
zip.addEmptyDirectory(virtualPath, opts)
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
} else if (stats.isFile()) {
zip.addFile(realPath, virtualPath, opts)
}
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
files.push({
realPath,
metadataPath,
})
}
}
@@ -188,41 +182,42 @@ export class RemoteAdapter {
})
}
async *_usePartitionFiles(diskId, partitionId, paths) {
const path = yield this.getPartition(diskId, partitionId)
const files = []
await asyncMap(paths, file =>
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
)
return files
}
// check if we will be allowed to merge a a vhd created in this adapter
// with the vhd at path `path`
async isMergeableParent(packedParentUid, path) {
return await Disposable.use(VhdSynthetic.fromVhdChain(this.handler, path), vhd => {
return await Disposable.use(openVhd(this.handler, path), vhd => {
// this baseUuid is not linked with this vhd
if (!vhd.footer.uuid.equals(packedParentUid)) {
return false
}
// check if all the chain is composed of vhd directory
const isVhdDirectory = vhd.checkVhdsClass(VhdDirectory)
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.useVhdDirectory()
})
}
fetchPartitionFiles(diskId, partitionId, paths, format) {
fetchPartitionFiles(diskId, partitionId, paths) {
const { promise, reject, resolve } = pDefer()
Disposable.use(
async function* () {
const path = yield this.getPartition(diskId, partitionId)
let outputStream
if (format === 'tgz') {
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
} else if (format === 'zip') {
const zip = new ZipFile()
await addZipEntries(zip, path, '', paths.map(makeRelative))
zip.end()
;({ outputStream } = zip)
} else {
throw new Error('unsupported format ' + format)
}
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
const zip = new ZipFile()
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
zip.end()
const { outputStream } = zip
resolve(outputStream)
await fromEvent(outputStream, 'end')
}.bind(this)
@@ -665,13 +660,14 @@ export class RemoteAdapter {
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, dedup = false } = {}) {
const handler = this._handler
if (this.useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
compression: this.#getCompressionType(),
dedup,
async validator() {
await input.task
return validator.apply(this, arguments)
@@ -684,13 +680,11 @@ export class RemoteAdapter {
}
}
async outputStream(path, input, { checksum = true, maxStreamLength, streamLength, validator = noop } = {}) {
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
const container = watchStreamSize(input)
await this._handler.outputStream(path, input, {
checksum,
dirMode: this._dirMode,
maxStreamLength,
streamLength,
async validator() {
await input.task
return validator.apply(this, arguments)
@@ -752,37 +746,10 @@ export class RemoteAdapter {
}
async readVmBackupMetadata(path) {
let json
let isImmutable = false
let remoteIsImmutable = false
// if the remote is immutable, check if this metadatas are also immutables
try {
// this file is not encrypted
await this._handler._readFile(IMMUTABILTY_METADATA_FILENAME)
remoteIsImmutable = true
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
try {
// this will trigger an EPERM error if the file is immutable
json = await this.handler.readFile(path, { flag: 'r+' })
// s3 handler don't respect flags
} catch (err) {
// retry without triggerring immutbaility check ,only on immutable remote
if (err.code === 'EPERM' && remoteIsImmutable) {
isImmutable = true
json = await this._handler.readFile(path, { flag: 'r' })
} else {
throw err
}
}
// _filename is a private field used to compute the backup id
//
// it's enumerable to make it cacheable
const metadata = { ...JSON.parse(json), _filename: path, isImmutable }
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
if (typeof metadata.vm.is_a_template === 'number') {
@@ -858,6 +825,8 @@ decorateMethodsWith(RemoteAdapter, {
debounceResourceFactory,
]),
_usePartitionFiles: Disposable.factory,
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
getPartition: Disposable.factory,

View File

@@ -21,12 +21,7 @@ export class RestoreMetadataBackup {
})
} else {
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
const dataFileName = resolve('/', backupId, metadata.data ?? 'data.json').slice(1)
const data = await handler.readFile(dataFileName)
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it
const isJson = dataFileName.endsWith('.json')
return isJson ? data.toString() : { encoding: 'base64', data: data.toString('base64') }
return String(await handler.readFile(resolve(backupId, metadata.data ?? 'data.json')))
}
}
}

View File

@@ -67,11 +67,6 @@ async function generateVhd(path, opts = {}) {
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
}
if (opts.blocks) {
for (const blockId of opts.blocks) {
await vhd.writeEntireBlock({ id: blockId, buffer: Buffer.alloc(2 * 1024 * 1024 + 512, blockId) })
}
}
await vhd.writeBlockAllocationTable()
await vhd.writeHeader()
await vhd.writeFooter()
@@ -235,7 +230,7 @@ test('it merges delta of non destroyed chain', async () => {
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children after the merge
assert.equal(metadata.size, 104960)
assert.equal(metadata.size, 209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
@@ -325,7 +320,6 @@ describe('tests multiple combination ', () => {
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
mode: vhdMode,
blocks: [1, 3],
})
const child = await generateVhd(`${basePath}/child.vhd`, {
useAlias,
@@ -334,7 +328,6 @@ describe('tests multiple combination ', () => {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: ancestor.footer.uuid,
},
blocks: [1, 2],
})
// a grand child vhd in metadata
await generateVhd(`${basePath}/grandchild.vhd`, {
@@ -344,7 +337,6 @@ describe('tests multiple combination ', () => {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: child.footer.uuid,
},
blocks: [2, 3],
})
// an older parent that was merging in clean
@@ -403,7 +395,7 @@ describe('tests multiple combination ', () => {
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children + clean after the merge
assert.deepEqual(metadata.size, vhdMode === 'file' ? 6502400 : 6501888)
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged

View File

@@ -36,32 +36,34 @@ const computeVhdsSize = (handler, vhdPaths) =>
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
logInfo(`merging VHD chain`, { chain })
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
if (merge) {
logInfo(`merging VHD chain`, { chain })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
@@ -121,19 +123,19 @@ export async function checkAliases(
) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
}
continue
}
let target
try {
target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
}
continue
}
const { dispose } = await openVhd(handler, target)
try {
await dispose()
@@ -469,20 +471,23 @@ export async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
}
await Promise.all([
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
logWarn('unused XVA', { path })
if (remove) {
@@ -504,11 +509,12 @@ export async function cleanVm(
// update size for delta metadata with merged VHD
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = backups.get(metadataPath)
let fileSystemSize
const mergedSize = metadataWithMergedVhd[metadataPath]
const merged = metadataWithMergedVhd[metadataPath] !== undefined
const { mode, size, vhds, xva } = metadata
@@ -518,29 +524,26 @@ export async function cleanVm(
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
if (fileSystemSize !== size && fileSystemSize !== undefined) {
logWarn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
} catch (error) {
// can fail with encrypted remote
}
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize === undefined) {
return
}
// don't warn if the size has changed after a merge
if (mergedSize === undefined) {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize !== undefined && fileSystemSize !== size) {
logWarn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
if (!merged && fileSystemSize !== size) {
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
console.warn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
}
} catch (error) {
@@ -548,19 +551,9 @@ export async function cleanVm(
return
}
// systematically update size and differentials after a merge
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
metadata.size = mergedSize ?? fileSystemSize ?? size
if (mergedSize) {
// all disks are now key disk
metadata.isVhdDifferencing = {}
for (const id of Object.values(metadata.vdis ?? {})) {
metadata.isVhdDifferencing[`${id}.vhd`] = false
}
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
mustRegenerateCache = true
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })

View File

@@ -1,3 +1,4 @@
import find from 'lodash/find.js'
import groupBy from 'lodash/groupBy.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import omit from 'lodash/omit.js'
@@ -11,18 +12,22 @@ import { cancelableMap } from './_cancelableMap.mjs'
import { Task } from './Task.mjs'
import pick from 'lodash/pick.js'
// in `other_config` of an incrementally replicated VM, contains the UUID of the source VM
export const TAG_BASE_DELTA = 'xo:base_delta'
// in `other_config` of an incrementally replicated VM, contains the UUID of the target SR used for replication
//
// added after the complete replication
export const TAG_BACKUP_SR = 'xo:backup:sr'
// in other_config of VDIs of an incrementally replicated VM, contains the UUID of the source VDI
export const TAG_COPY_SRC = 'xo:copy_of'
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
const resolveUuid = async (xapi, cache, uuid, type) => {
if (uuid == null) {
return uuid
}
let ref = cache.get(uuid)
if (ref === undefined) {
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
cache.set(uuid, ref)
}
return ref
}
export async function exportIncrementalVm(
vm,
@@ -34,8 +39,6 @@ export async function exportIncrementalVm(
fullVdisRequired = new Set(),
disableBaseTags = false,
nbdConcurrency = 1,
preferNbd,
} = {}
) {
// refs of VM's VDIs → base's VDIs.
@@ -83,8 +86,6 @@ export async function exportIncrementalVm(
baseRef: baseVdi?.$ref,
cancelToken,
format: 'vhd',
nbdConcurrency,
preferNbd,
})
})
@@ -142,7 +143,7 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
$defer,
incrementalVm,
sr,
{ cancelToken = CancelToken.none, newMacAddresses = false } = {}
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
) {
const { version } = incrementalVm
if (compareVersions(version, '1.0.0') < 0) {
@@ -152,6 +153,32 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
const vmRecord = incrementalVm.vm
const xapi = sr.$xapi
let baseVm
if (detectBase) {
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
if (remoteBaseVmUuid) {
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
}
const cache = new Map()
const mapVdisSrRefs = {}
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
}
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
const vdiRecords = incrementalVm.vdis
// 0. Create suspend_VDI
@@ -163,7 +190,18 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
})
} else {
suspendVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
suspendVdi = await xapi.getRecord(
'VDI',
await xapi.VDI_create({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => suspendVdi.$destroy())
}
}
@@ -181,6 +219,10 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
ha_always_run: false,
is_a_template: false,
name_label: '[Importing…] ' + vmRecord.name_label,
other_config: {
...vmRecord.other_config,
[TAG_COPY_SRC]: vmRecord.uuid,
},
},
{
bios_strings: vmRecord.bios_strings,
@@ -201,8 +243,14 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
const vdi = vdiRecords[vdiRef]
let newVdi
if (vdi.baseVdi !== undefined) {
newVdi = await xapi.getRecord('VDI', await vdi.baseVdi.$clone())
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (remoteBaseVdiUuid) {
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
$defer.onFailure(() => newVdi.$destroy())
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
@@ -213,7 +261,18 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
// suspendVDI has already created
newVdi = suspendVdi
} else {
newVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
newVdi = await xapi.getRecord(
'VDI',
await xapi.VDI_create({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => newVdi.$destroy())
}
@@ -252,19 +311,13 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
// Import VDI contents.
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
for (let stream of ensureArray(streams[`${id}.vhd`])) {
if (stream === null) {
// we restore a backup and reuse completly a local snapshot
continue
}
if (typeof stream === 'function') {
stream = await stream()
}
if (stream.length === undefined) {
stream = await createVhdStreamWithLength(stream)
}
await xapi.setField('VDI', vdi.$ref, 'name_label', `[Importing] ${vdiRecords[id].name_label}`)
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
await xapi.setField('VDI', vdi.$ref, 'name_label', vdiRecords[id].name_label)
}
}),

View File

@@ -17,6 +17,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
concurrency: 2,
copyRetention: 0,
deleteFirst: false,
dedup: false,
diskPerVmConcurrency: 0, // not limited by default
exportRetention: 0,
fullInterval: 0,

View File

@@ -22,13 +22,7 @@ export class XoMetadataBackup {
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
const data = job.xoMetadata
let dataBaseName = './data'
// JSON data is sent as plain string, binary data is sent as an object with `data` and `encoding properties
const isJson = typeof data === 'string'
if (isJson) {
dataBaseName += '.json'
}
const dataBaseName = './data.json'
const metadata = JSON.stringify(
{
@@ -60,7 +54,7 @@ export class XoMetadataBackup {
async () => {
const handler = adapter.handler
const dirMode = this._config.dirMode
await handler.outputFile(dataFileName, isJson ? data : Buffer.from(data.data, data.encoding), { dirMode })
await handler.outputFile(dataFileName, data, { dirMode })
await handler.outputFile(metaDataFileName, metadata, {
dirMode,
})

View File

@@ -29,8 +29,6 @@ export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote
writer =>
writer.run({
stream: forkStreamUnpipe(stream),
// stream will be forked and transformed, it's not safe to attach additionnal properties to it
streamLength: stream.length,
timestamp: metadata.timestamp,
vm: metadata.vm,
vmSnapshot: metadata.vmSnapshot,

View File

@@ -35,25 +35,13 @@ export const FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
useSnapshot: false,
})
)
const vdis = await exportedVm.$getDisks()
let maxStreamLength = 1024 * 1024 // Ovf file and tar headers are a few KB, let's stay safe
for (const vdiRef of vdis) {
const vdi = await this._xapi.getRecord('VDI', vdiRef)
// the size a of fully allocated vdi will be virtual_size exaclty, it's a gross over evaluation
// of the real stream size in general, since a disk is never completly full
// vdi.physical_size seems to underevaluate a lot the real disk usage of a VDI, as of 2023-10-30
maxStreamLength += vdi.virtual_size
}
const sizeContainer = watchStreamSize(stream)
const timestamp = Date.now()
await this._callWriters(
writer =>
writer.run({
maxStreamLength,
sizeContainer,
stream: forkStreamUnpipe(stream),
timestamp,

View File

@@ -2,7 +2,6 @@ import { asyncEach } from '@vates/async-each'
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import assert from 'node:assert'
import * as UUID from 'uuid'
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
import mapValues from 'lodash/mapValues.js'
@@ -10,48 +9,11 @@ import { AbstractRemote } from './_AbstractRemote.mjs'
import { forkDeltaExport } from './_forkDeltaExport.mjs'
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
import { Task } from '../../Task.mjs'
import { Disposable } from 'promise-toolbox'
import { openVhd } from 'vhd-lib'
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
_getRemoteWriter() {
return IncrementalRemoteWriter
}
async _selectBaseVm(metadata) {
// for each disk , get the parent
const baseUuidToSrcVdi = new Map()
// no previous backup for a base( =key) backup
if (metadata.isBase) {
return
}
await asyncEach(Object.entries(metadata.vdis), async ([id, vdi]) => {
const isDifferencing = metadata.isVhdDifferencing[`${id}.vhd`]
if (isDifferencing) {
const vmDir = getVmBackupDir(metadata.vm.uuid)
const path = `${vmDir}/${metadata.vhds[id]}`
// don't catch error : we can't recover if the source vhd are missing
await Disposable.use(openVhd(this._sourceRemoteAdapter._handler, path), vhd => {
baseUuidToSrcVdi.set(UUID.stringify(vhd.header.parentUuid), vdi.$snapshot_of$uuid)
})
}
})
const presentBaseVdis = new Map(baseUuidToSrcVdi)
await this._callWriters(
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis),
'writer.checkBaseVdis()',
false
)
// check if the parent vdi are present in all the remotes
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
throw new Error(`Missing vdi ${baseUuid} which is a base for a delta`)
}
})
// yeah , let's go
}
async _run($defer) {
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
await this._callWriters(async writer => {
@@ -64,16 +26,16 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
if (transferList.length > 0) {
for (const metadata of transferList) {
assert.strictEqual(metadata.mode, 'delta')
await this._selectBaseVm(metadata)
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
useChain: false,
})
const isVhdDifferencing = {}
const differentialVhds = {}
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
differentialVhds[key] = await isVhdDifferencingDisk(stream)
})
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
@@ -81,24 +43,13 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
writer =>
writer.transfer({
deltaExport: forkDeltaExport(incrementalExport),
isVhdDifferencing,
differentialVhds,
timestamp: metadata.timestamp,
vm: metadata.vm,
vmSnapshot: metadata.vmSnapshot,
}),
'writer.transfer()'
)
// this will update parent name with the needed alias
await this._callWriters(
writer =>
writer.updateUuidAndChain({
isVhdDifferencing,
timestamp: metadata.timestamp,
vdis: incrementalExport.vdis,
}),
'writer.updateUuidAndChain()'
)
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
// for healthcheck
this._tags = metadata.vm.tags

View File

@@ -41,8 +41,6 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
fullVdisRequired,
nbdConcurrency: this._settings.nbdConcurrency,
preferNbd: this._settings.preferNbd,
})
// since NBD is network based, if one disk use nbd , all the disk use them
// except the suspended VDI
@@ -50,11 +48,11 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
Task.info('Transfer data using NBD')
}
const isVhdDifferencing = {}
const differentialVhds = {}
// since isVhdDifferencingDisk is reading and unshifting data in stream
// it should be done BEFORE any other stream transform
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
differentialVhds[key] = await isVhdDifferencingDisk(stream)
})
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
@@ -69,7 +67,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
writer =>
writer.transfer({
deltaExport: forkDeltaExport(deltaExport),
isVhdDifferencing,
differentialVhds,
sizeContainers,
timestamp,
vm,
@@ -78,18 +76,6 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
'writer.transfer()'
)
// we want to control the uuid of the vhd in the chain
// and ensure they are correctly chained
await this._callWriters(
writer =>
writer.updateUuidAndChain({
isVhdDifferencing,
timestamp,
vdis: deltaExport.vdis,
}),
'writer.updateUuidAndChain()'
)
this._baseVm = exportedVm
if (baseVm !== undefined) {
@@ -145,7 +131,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
])
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(baseUuid, srcVdi.uuid)
baseUuidToSrcVdi.set(baseUuid, srcVdi)
} else {
debug('ignore snapshot VDI because no longer present on VM', {
vdi: baseUuid,
@@ -166,18 +152,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdiUuid,
vdi: srcVdi.uuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdiUuid,
vdi: srcVdi.uuid,
})
fullVdisRequired.add(srcVdiUuid)
fullVdisRequired.add(srcVdi.uuid)
}
})

View File

@@ -4,7 +4,6 @@ import { Disposable } from 'promise-toolbox'
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
import { Abstract } from './_Abstract.mjs'
import { extractIdsFromSimplePattern } from '../../extractIdsFromSimplePattern.mjs'
export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
constructor({
@@ -35,8 +34,7 @@ export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstrac
this._writers = writers
const RemoteWriter = this._getRemoteWriter()
extractIdsFromSimplePattern(job.remotes).forEach(remoteId => {
const adapter = remoteAdapters[remoteId]
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
const targetSettings = {
...settings,
...allSettings[remoteId],

View File

@@ -31,11 +31,6 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
throw new Error('cannot backup a VM created by this very job')
}
const currentOperations = Object.values(vm.current_operations)
if (currentOperations.some(_ => _ === 'migrate_send' || _ === 'pool_migrate')) {
throw new Error('cannot backup a VM currently being migrated')
}
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
@@ -261,15 +256,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
}
if (this._writers.size !== 0) {
const { pool_migrate = null, migrate_send = null } = this._exportedVm.blocked_operations
const reason = 'VM migration is blocked during backup'
await this._exportedVm.update_blocked_operations({ pool_migrate: reason, migrate_send: reason })
try {
await this._copy()
} finally {
await this._exportedVm.update_blocked_operations({ pool_migrate, migrate_send })
}
await this._copy()
}
} finally {
if (startAfter) {

View File

@@ -1,11 +1,11 @@
import cloneDeep from 'lodash/cloneDeep.js'
import mapValues from 'lodash/mapValues.js'
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
export function forkDeltaExport(deltaExport) {
const { streams, ...rest } = deltaExport
const newMetadata = cloneDeep(rest)
newMetadata.streams = mapValues(streams, forkStreamUnpipe)
return newMetadata
return Object.create(deltaExport, {
streams: {
value: mapValues(deltaExport.streams, forkStreamUnpipe),
},
})
}

View File

@@ -24,7 +24,7 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
)
}
async _run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
async _run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
const settings = this._settings
const job = this._job
const scheduleId = this._scheduleId
@@ -65,8 +65,6 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
await Task.run({ name: 'transfer' }, async () => {
await adapter.outputStream(dataFilename, stream, {
maxStreamLength,
streamLength,
validator: tmpPath => adapter.isValidXva(tmpPath),
})
return { size: sizeContainer.size }

View File

@@ -1,12 +1,13 @@
import assert from 'node:assert'
import mapValues from 'lodash/mapValues.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncEach } from '@vates/async-each'
import { asyncMap } from '@xen-orchestra/async-map'
import { chainVhd, openVhd } from 'vhd-lib'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { dirname, basename } from 'node:path'
import { dirname } from 'node:path'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
@@ -21,45 +22,42 @@ import { Disposable } from 'promise-toolbox'
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
#parentVdiPaths
#vhds
async checkBaseVdis(baseUuidToSrcVdi) {
this.#parentVdiPaths = {}
const { handler } = this._adapter
const adapter = this._adapter
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdiUuid]) => {
let parentDestPath
const vhdDir = `${vdisDir}/${srcVdiUuid}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
let found = false
try {
const vhds = await handler.list(vhdDir, {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
// the last one is probably the right one
for (let i = vhds.length - 1; i >= 0 && parentDestPath === undefined; i--) {
const path = vhds[i]
await asyncMap(vhds, async path => {
try {
if (await adapter.isMergeableParent(packedBaseUuid, path)) {
parentDestPath = path
}
await checkVhdChain(handler, path)
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
//
// since all the checks of a path are done in parallel, found would be containing
// only the last answer of isMergeableParent which is probably not the right one
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
found = found || isMergeable
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
}
}
})
} catch (error) {
warn('checkBaseVdis', { error })
}
// no usable parent => the runner will have to decide to fall back to a full or stop backup
if (parentDestPath === undefined) {
if (!found) {
baseUuidToSrcVdi.delete(baseUuid)
} else {
this.#parentVdiPaths[vhdDir] = parentDestPath
}
})
}
@@ -124,44 +122,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
}
}
async updateUuidAndChain({ isVhdDifferencing, vdis }) {
assert.notStrictEqual(
this.#vhds,
undefined,
'_transfer must be called before updateUuidAndChain for incremental backups'
)
const parentVdiPaths = this.#parentVdiPaths
const { handler } = this._adapter
const vhds = this.#vhds
await asyncEach(Object.entries(vdis), async ([id, vdi]) => {
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
const path = `${this._vmBackupDir}/${vhds[id]}`
if (isDifferencing) {
assert.notStrictEqual(
parentVdiPaths,
'checkbasevdi must be called before updateUuidAndChain for incremental backups'
)
const parentPath = parentVdiPaths[dirname(path)]
// we are in a incremental backup
// we already computed the chain in checkBaseVdis
assert.notStrictEqual(parentPath, undefined, 'A differential VHD must have a parent')
// forbid any kind of loop
assert.ok(basename(parentPath) < basename(path), `vhd must be sorted to be chained`)
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD if needed
await Disposable.use(openVhd(handler, path), async vhd => {
if (!vhd.footer.uuid.equals(packUuid(vdi.uuid))) {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
}
})
})
}
async _deleteOldEntries() {
const adapter = this._adapter
const oldEntries = this._oldEntries
@@ -172,7 +132,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
}
}
async _transfer($defer, { isVhdDifferencing, timestamp, deltaExport, vm, vmSnapshot }) {
async _transfer($defer, { differentialVhds, timestamp, deltaExport, vm, vmSnapshot }) {
const adapter = this._adapter
const job = this._job
const scheduleId = this._scheduleId
@@ -180,10 +140,14 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
const jobId = job.id
const handler = adapter.handler
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// @todo : should skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
}
const basename = formatFilenameDate(timestamp)
// update this.#vhds before eventually skipping transfer, so that
// updateUuidAndChain has all the mandatory data
const vhds = (this.#vhds = mapValues(
const vhds = mapValues(
deltaExport.vdis,
vdi =>
`vdis/${jobId}/${
@@ -193,18 +157,10 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vdi.uuid
: vdi.$snapshot_of$uuid
}/${adapter.getVhdFileName(basename)}`
))
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
return { size: 0 }
}
)
metadataContent = {
isVhdDifferencing,
dedup: settings.dedup,
jobId,
mode: job.mode,
scheduleId,
@@ -217,24 +173,57 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vm,
vmSnapshot,
}
const { size } = await Task.run({ name: 'transfer' }, async () => {
let transferSize = 0
await asyncEach(
Object.keys(deltaExport.vdis),
async id => {
Object.entries(deltaExport.vdis),
async ([id, vdi]) => {
const path = `${this._vmBackupDir}/${vhds[id]}`
// don't write it as transferSize += await async function
// since i += await asyncFun lead to race condition
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
const transferSizeOneDisk = await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
const isDelta = differentialVhds[`${id}.vhd`]
let parentPath
if (isDelta) {
const vdiDir = dirname(path)
parentPath = (
await handler.list(vdiDir, {
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
prependDir: true,
})
)
.sort()
.pop()
assert.notStrictEqual(
parentPath,
undefined,
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config['xo:base_delta']}`
)
parentPath = parentPath.slice(1) // remove leading slash
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
}
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
dedup: settings.dedup,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._config.writeBlockConcurrency,
})
transferSize += transferSizeOneDisk
if (isDelta) {
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD
await Disposable.use(openVhd(handler, path), async vhd => {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
},
{
concurrency: settings.diskPerVmConcurrency,

View File

@@ -1,21 +1,18 @@
import assert from 'node:assert'
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { formatDateTime } from '@xen-orchestra/xapi'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { importIncrementalVm, TAG_BACKUP_SR, TAG_BASE_DELTA, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
import { importIncrementalVm, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
import { listReplicatedVms } from './_listReplicatedVms.mjs'
import find from 'lodash/find.js'
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
assert.notStrictEqual(baseVm, undefined)
const sr = this._sr
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
@@ -38,9 +35,7 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
}
}
}
updateUuidAndChain() {
// nothing to do, the chaining is not modified in this case
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -86,54 +81,6 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
}
#decorateVmMetadata(backup) {
const { _warmMigration } = this._settings
const sr = this._sr
const xapi = sr.$xapi
const vm = backup.vm
vm.other_config[TAG_COPY_SRC] = vm.uuid
const remoteBaseVmUuid = vm.other_config[TAG_BASE_DELTA]
let baseVm
if (remoteBaseVmUuid) {
baseVm = find(
xapi.objects.all,
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
const baseVdis = {}
baseVm?.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
vm.other_config[TAG_COPY_SRC] = vm.uuid
if (!_warmMigration) {
vm.tags.push('Continuous Replication')
}
Object.values(backup.vdis).forEach(vdi => {
vdi.other_config[TAG_COPY_SRC] = vdi.uuid
vdi.SR = sr.$ref
// vdi.other_config[TAG_BASE_DELTA] is never defined on a suspend vdi
if (vdi.other_config[TAG_BASE_DELTA]) {
const remoteBaseVdiUuid = vdi.other_config[TAG_BASE_DELTA]
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
vdi.baseVdi = baseVdi
}
})
return backup
}
async _transfer({ timestamp, deltaExport, sizeContainers, vm }) {
const { _warmMigration } = this._settings
const sr = this._sr
@@ -144,7 +91,16 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
let targetVmRef
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await importIncrementalVm(this.#decorateVmMetadata(deltaExport), sr)
targetVmRef = await importIncrementalVm(
{
__proto__: deltaExport,
vm: {
...deltaExport.vm,
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
},
},
sr
)
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
@@ -165,13 +121,13 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
)
),
targetVm.update_other_config({
[TAG_BACKUP_SR]: srUuid,
'xo:backup:sr': srUuid,
// these entries need to be added in case of offline backup
'xo:backup:datetime': formatDateTime(timestamp),
'xo:backup:job': job.id,
'xo:backup:schedule': scheduleId,
[TAG_BASE_DELTA]: vm.uuid,
'xo:backup:vm': vm.uuid,
}),
])
}

View File

@@ -1,9 +1,9 @@
import { AbstractWriter } from './_AbstractWriter.mjs'
export class AbstractFullWriter extends AbstractWriter {
async run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
async run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
try {
return await this._run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot })
return await this._run({ timestamp, sizeContainer, stream, vm, vmSnapshot })
} finally {
// ensure stream is properly closed
stream.destroy()

View File

@@ -5,10 +5,6 @@ export class AbstractIncrementalWriter extends AbstractWriter {
throw new Error('Not implemented')
}
updateUuidAndChain() {
throw new Error('Not implemented')
}
cleanup() {
throw new Error('Not implemented')
}

View File

@@ -96,9 +96,6 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
metadata,
srUuid,
xapi,
settings: {
additionnalVmTag: 'xo:no-bak=Health Check',
},
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
@@ -113,13 +110,13 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
)
}
async _isAlreadyTransferred(timestamp) {
_isAlreadyTransferred(timestamp) {
const vmUuid = this._vmUuid
const adapter = this._adapter
const backupDir = getVmBackupDir(vmUuid)
try {
const actualMetadata = JSON.parse(
await adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
)
return actualMetadata
} catch (error) {}

View File

@@ -18,7 +18,7 @@ export const MixinXapiWriter = (BaseClass = Object) =>
const vdiRefs = await xapi.VM_getDisks(baseVm.$ref)
for (const vdiRef of vdiRefs) {
const vdi = xapi.getObject(vdiRef)
if (vdi.$SR.uuid !== this._healthCheckSr.uuid) {
if (vdi.$SR.uuid !== this._heathCheckSr.uuid) {
return false
}
}
@@ -58,7 +58,7 @@ export const MixinXapiWriter = (BaseClass = Object) =>
)
}
const healthCheckVm = xapi.getObject(healthCheckVmRef) ?? (await xapi.waitObject(healthCheckVmRef))
await healthCheckVm.add_tags('xo:no-bak=Health Check')
await new HealthCheckVmBackup({
restoredVm: healthCheckVm,
xapi,

View File

@@ -45,6 +45,34 @@ When `useVhdDirectory` is enabled on the remote, the directory containing the VH
└─ <uuid>.vhd
```
#### vhd directory with deduplication
the difference with non dedup mode is that a hash is computed of each vhd block. The hash is splited in 4 chars token and the data are stored in xo-block-store/{token1}/.../{token7}/{token8}.source.
Then a hard link is made from this source to the destination folder in <vdis>/<job UUID>/<VDI UUID>/blocks/{number}/{number}
```
<remote>
└─ xo-block-store
└─ {4 char}
└─ ...
└─ {char.source}
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ cache.json.gz
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
│ ├─ index.json // TODO
│ ├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
| └─ data
| ├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
├─ <YYYYMMDD>T<HHmmss>.json // backup metadata
├─ <YYYYMMDD>T<HHmmss>.xva
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
@@ -221,7 +249,7 @@ For multiple objects:
### Settings
Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/_runners/VmsXapi.mjs).
Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/Backup.js).
## Writer API
@@ -230,7 +258,6 @@ Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](http
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
- `prepare({ isFull })`
- `transfer({ timestamp, deltaExport, sizeContainers })`
- `updateUuidAndChain({ isVhdDifferencing, vdis })`
- `cleanup()`
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
- **Full**

View File

@@ -0,0 +1,23 @@
# Deduplication
- This this use a additionnal inode (or equivalent on the FS), for each different block in the xo-block-store`sub folder`
- This will not work well with immutabilty/object lock
- only dedup blocks of vhd directory
- prerequisite are : the fs must support hard link and extended attributes
- a key (full backup) does not take more space on te remote than a delta. It will take more inodes , and more time since we'll have to read all the blocks. T
When a new block is written to the remote, a hash is computed. If a file with this hash doesn't exists in xo-block-store` create it, then add the has as an extended attributes.
A link hard link, sharing data and extended attributes is then create to the destination
When deleting a block which has a hash extended attributes, a check is done on the xo-block-store. If there are no other link, then the block is deleted . The directory containing it stays
When merging block : the unlink method is called before overwriting an existing block
### troubleshooting
Since all the blocks are hard linked, you can convert a deduplicated remote to a non deduplicated one by deleting the xo-block-store directory
two new method has been added to the local fs handler :
- deduplicationGarbageCollector(), which should be called from the root of the FS : it will clean any block without other links, and any empty directory
- deduplicationStats() that will compute the number of blocks in store and how many times they are used

View File

@@ -2,21 +2,6 @@ import mapValues from 'lodash/mapValues.js'
import { dirname } from 'node:path'
function formatVmBackup(backup) {
const { isVhdDifferencing, vmSnapshot } = backup
let differencingVhds
let dynamicVhds
// some backups don't use snapshots, therefore cannot be with memory
const withMemory = vmSnapshot !== undefined && vmSnapshot.suspend_VDI !== 'OpaqueRef:NULL'
// isVhdDifferencing is either undefined or an object
if (isVhdDifferencing !== undefined) {
differencingVhds = Object.values(isVhdDifferencing).filter(t => t).length
dynamicVhds = Object.values(isVhdDifferencing).filter(t => !t).length
if (withMemory) {
// the suspend VDI (memory) is always a dynamic
dynamicVhds -= 1
}
}
return {
disks:
backup.vhds === undefined
@@ -31,7 +16,7 @@ function formatVmBackup(backup) {
}),
id: backup.id,
isImmutable: backup.isImmutable,
dedup: backup.dedup,
jobId: backup.jobId,
mode: backup.mode,
scheduleId: backup.scheduleId,
@@ -41,10 +26,6 @@ function formatVmBackup(backup) {
name_description: backup.vm.name_description,
name_label: backup.vm.name_label,
},
differencingVhds,
dynamicVhds,
withMemory,
}
}

View File

@@ -2,21 +2,18 @@
// eslint-disable-next-line eslint-comments/disable-enable-pair
/* eslint-disable n/shebang */
import { asyncEach } from '@vates/async-each'
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
import { createLogger } from '@xen-orchestra/log'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { join } from 'node:path'
import { load as loadConfig } from 'app-conf'
import Disposable from 'promise-toolbox/Disposable'
import min from 'lodash/min.js'
import { getVmBackupDir } from '../_getVmBackupDir.mjs'
import { RemoteAdapter } from '../RemoteAdapter.mjs'
import { CLEAN_VM_QUEUE } from './index.mjs'
const APP_NAME = 'xo-merge-worker'
const APP_DIR = new URL('.', import.meta.url).pathname
// -------------------------------------------------------------------
catchGlobalErrors(createLogger('xo:backups:mergeWorker'))
@@ -37,7 +34,6 @@ const main = Disposable.wrap(async function* main(args) {
for (let i = 0; i < 10; ++i) {
const entries = await handler.list(CLEAN_VM_QUEUE)
if (entries.length !== 0) {
entries.sort()
return entries
}
await new Promise(timeoutResolver)
@@ -46,47 +42,38 @@ const main = Disposable.wrap(async function* main(args) {
let taskFiles
while ((taskFiles = await listRetry()) !== undefined) {
const { concurrency } = await loadConfig(APP_NAME, {
appDir: APP_DIR,
ignoreUnknownFormats: true,
})
await asyncEach(
taskFiles,
async taskFileBasename => {
const previousTaskFile = join(CLEAN_VM_QUEUE, taskFileBasename)
const taskFile = join(CLEAN_VM_QUEUE, '_' + taskFileBasename)
const taskFileBasename = min(taskFiles)
const previousTaskFile = join(CLEAN_VM_QUEUE, taskFileBasename)
const taskFile = join(CLEAN_VM_QUEUE, '_' + taskFileBasename)
// move this task to the end
try {
await handler.rename(previousTaskFile, taskFile)
} catch (error) {
// this error occurs if the task failed too many times (i.e. too many `_` prefixes)
// there is nothing more that can be done
if (error.code === 'ENAMETOOLONG') {
await handler.unlink(previousTaskFile)
}
// move this task to the end
try {
await handler.rename(previousTaskFile, taskFile)
} catch (error) {
// this error occurs if the task failed too many times (i.e. too many `_` prefixes)
// there is nothing more that can be done
if (error.code === 'ENAMETOOLONG') {
await handler.unlink(previousTaskFile)
}
throw error
}
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {
throw error
}
}
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {
throw error
}
}
handler.unlink(taskFile).catch(error => warn('deleting task failure', { error }))
} catch (error) {
warn('failure handling task', { error })
}
},
{ concurrency }
)
handler.unlink(taskFile).catch(error => warn('deleting task failure', { error }))
} catch (error) {
warn('failure handling task', { error })
}
}
})

View File

@@ -1 +0,0 @@
concurrency = 1

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.44.6",
"version": "0.39.0",
"engines": {
"node": ">=14.18"
},
@@ -17,23 +17,21 @@
"test-integration": "node--test *.integ.mjs"
},
"dependencies": {
"@iarna/toml": "^2.2.5",
"@kldzj/stream-throttle": "^1.1.1",
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.1.0",
"@vates/disposable": "^0.1.5",
"@vates/fuse-vhd": "^2.1.0",
"@vates/nbd-client": "^3.0.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.4",
"@vates/fuse-vhd": "^1.0.0",
"@vates/nbd-client": "^1.2.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^4.1.4",
"@xen-orchestra/fs": "^4.0.1",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/template": "^0.1.0",
"app-conf": "^2.3.0",
"compare-versions": "^6.0.0",
"d3-time-format": "^4.1.0",
"compare-versions": "^5.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"golike-defer": "^0.5.1",
"limit-concurrency-decorator": "^0.5.0",
@@ -42,21 +40,20 @@
"parse-pairs": "^2.0.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"tar": "^6.1.15",
"uuid": "^9.0.0",
"vhd-lib": "^4.9.0",
"xen-api": "^2.0.1",
"vhd-lib": "^4.5.0",
"xen-api": "^1.3.3",
"yazl": "^2.5.1"
},
"devDependencies": {
"fs-extra": "^11.1.0",
"rimraf": "^5.0.1",
"sinon": "^17.0.1",
"sinon": "^15.0.1",
"test": "^3.2.1",
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^4.2.0"
"@xen-orchestra/xapi": "^2.2.1"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -1,10 +1,11 @@
#!/usr/bin/env node
import { defer } from 'golike-defer'
import { readFileSync } from 'fs'
import { Ref, Xapi } from 'xen-api'
'use strict'
const pkg = JSON.parse(readFileSync(new URL('./package.json', import.meta.url)))
const { Ref, Xapi } = require('xen-api')
const { defer } = require('golike-defer')
const pkg = require('./package.json')
Xapi.prototype.getVmDisks = async function (vm) {
const disks = { __proto__: null }

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/cr-seed-cli",
"version": "1.0.0",
"version": "0.2.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cr-seed-cli",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -10,15 +10,15 @@
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=10"
"node": ">=8"
},
"bin": {
"xo-cr-seed": "./index.mjs"
"xo-cr-seed": "./index.js"
},
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^2.0.1"
"xen-api": "^1.3.3"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -42,7 +42,7 @@
"test": "node--test"
},
"devDependencies": {
"sinon": "^17.0.1",
"sinon": "^15.0.1",
"test": "^3.2.1"
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "4.1.4",
"version": "4.0.1",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -28,12 +28,13 @@
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.1.0",
"@vates/read-chunk": "^1.2.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.1.1",
"@xen-orchestra/log": "^0.6.0",
"bind-property-descriptor": "^2.0.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",
"fs-extended-attributes": "^1.0.1",
"fs-extra": "^11.1.0",
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.5.0",
@@ -53,7 +54,7 @@
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
"rimraf": "^5.0.1",
"sinon": "^17.0.1",
"sinon": "^15.0.4",
"test": "^3.3.0",
"tmp": "^0.2.1"
},

View File

@@ -189,7 +189,7 @@ export default class RemoteHandlerAbstract {
* @param {number} [options.dirMode]
* @param {(this: RemoteHandlerAbstract, path: string) => Promise<undefined>} [options.validator] Function that will be called before the data is commited to the remote, if it fails, file should not exist
*/
async outputStream(path, input, { checksum = true, dirMode, maxStreamLength, streamLength, validator } = {}) {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
@@ -201,8 +201,6 @@ export default class RemoteHandlerAbstract {
}
await this._outputStream(path, input, {
dirMode,
maxStreamLength,
streamLength,
validator,
})
if (checksum) {
@@ -270,9 +268,9 @@ export default class RemoteHandlerAbstract {
await this._mktree(normalizePath(dir), { mode })
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
async outputFile(file, data, { dedup = false, dirMode, flags = 'wx' } = {}) {
const encryptedData = this.#encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
await this._outputFile(normalizePath(file), encryptedData, { dedup, dirMode, flags })
}
async read(file, buffer, position) {
@@ -321,8 +319,8 @@ export default class RemoteHandlerAbstract {
await timeout.call(this._rmdir(normalizePath(dir)).catch(ignoreEnoent), this._timeout)
}
async rmtree(dir) {
await this._rmtree(normalizePath(dir))
async rmtree(dir, { dedup } = {}) {
await this._rmtree(normalizePath(dir), { dedup })
}
// Asks the handler to sync the state of the effective remote with its'
@@ -364,7 +362,7 @@ export default class RemoteHandlerAbstract {
let data
try {
// this file is not encrypted
data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME), 'utf-8')
const json = JSON.parse(data)
encryptionAlgorithm = json.algorithm
} catch (error) {
@@ -377,7 +375,7 @@ export default class RemoteHandlerAbstract {
try {
this.#rawEncryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
// this file is encrypted
const data = await this.__readFile(ENCRYPTION_METADATA_FILENAME)
const data = await this.__readFile(ENCRYPTION_METADATA_FILENAME, 'utf-8')
JSON.parse(data)
} catch (error) {
// can be enoent, bad algorithm, or broeken json ( bad key or algorithm)
@@ -399,6 +397,10 @@ export default class RemoteHandlerAbstract {
}
}
async checkSupport() {
return {}
}
async test() {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
@@ -439,14 +441,14 @@ export default class RemoteHandlerAbstract {
await this._truncate(file, len)
}
async __unlink(file, { checksum = true } = {}) {
async __unlink(file, { checksum = true, dedup = false } = {}) {
file = normalizePath(file)
if (checksum) {
ignoreErrors.call(this._unlink(checksumFile(file)))
}
await this._unlink(file).catch(ignoreEnoent)
await this._unlink(file, { dedup }).catch(ignoreEnoent)
}
async write(file, buffer, position) {
@@ -562,17 +564,16 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async _outputFile(file, data, { dirMode, flags }) {
async _outputFile(file, data, { dirMode, flags, dedup = false }) {
try {
return await this._writeFile(file, data, { flags })
return await this._writeFile(file, data, { dedup, flags })
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
await this._mktree(dirname(file), { mode: dirMode })
return this._outputFile(file, data, { flags })
return this._outputFile(file, data, { dedup, flags })
}
async _outputStream(path, input, { dirMode, validator }) {
@@ -615,7 +616,7 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async _rmtree(dir) {
async _rmtree(dir, { dedup } = {}) {
try {
return await this._rmdir(dir)
} catch (error) {
@@ -626,18 +627,14 @@ export default class RemoteHandlerAbstract {
const files = await this._list(dir)
await asyncEach(files, file =>
this._unlink(`${dir}/${file}`).catch(
error => {
// Unlink dir behavior is not consistent across platforms
// https://github.com/nodejs/node-v0.x-archive/issues/5791
if (error.code === 'EISDIR' || error.code === 'EPERM') {
return this._rmtree(`${dir}/${file}`)
}
throw error
},
// real unlink concurrency will be 2**max directory depth
{ concurrency: 2 }
)
this._unlink(`${dir}/${file}`, { dedup }).catch(error => {
// Unlink dir behavior is not consistent across platforms
// https://github.com/nodejs/node-v0.x-archive/issues/5791
if (error.code === 'EISDIR' || error.code === 'EPERM') {
return this._rmtree(`${dir}/${file}`)
}
throw error
})
)
return this._rmtree(dir)
}
@@ -645,7 +642,7 @@ export default class RemoteHandlerAbstract {
// called to initialize the remote
async _sync() {}
async _unlink(file) {
async _unlink(file, opts) {
throw new Error('Not implemented')
}

View File

@@ -209,7 +209,7 @@ describe('encryption', () => {
// encrypt with a non default algorithm
const encryptor = _getEncryptor('aes-256-cbc', '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "aes-256-gcm"}`)
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "aes-256-gmc"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
// remote is now non empty : can't modify key anymore

View File

@@ -19,7 +19,8 @@ try {
} catch (_) {}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]
const { type } = parse(remote.url)
const Handler = HANDLERS[type]
if (!Handler) {
throw new Error('Unhandled remote type')
}

View File

@@ -1,10 +1,17 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
// import fsx from 'fs-extended-attributes'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import { asyncEach } from '@vates/async-each'
import { fromEvent, fromCallback, ignoreErrors, retry } from 'promise-toolbox'
import { synchronized } from 'decorator-synchronized'
import RemoteHandlerAbstract from './abstract'
import { normalize as normalizePath } from './path'
import assert from 'node:assert'
import { createHash, randomBytes } from 'node:crypto'
const { info, warn } = createLogger('xo:fs:local')
@@ -37,6 +44,10 @@ export default class LocalHandler extends RemoteHandlerAbstract {
#addSyncStackTrace
#retriesOnEagain
#supportDedup
#dedupDirectory = '/xo-block-store'
#hashMethod = 'sha256'
#attributeKey = `user.hash.${this.#hashMethod}`
constructor(remote, opts = {}) {
super(remote)
@@ -171,12 +182,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
}
async _readFile(file, { flags, ...options } = {}) {
// contrary to createReadStream, readFile expect singular `flag`
if (flags !== undefined) {
options.flag = flags
}
async _readFile(file, options) {
const filePath = this.getFilePath(file)
return await this.#addSyncStackTrace(retry, () => fs.readFile(filePath, options), this.#retriesOnEagain)
}
@@ -199,16 +205,267 @@ export default class LocalHandler extends RemoteHandlerAbstract {
return this.#addSyncStackTrace(fs.truncate, this.getFilePath(file), len)
}
async _unlink(file) {
const filePath = this.getFilePath(file)
async #localUnlink(filePath) {
return await this.#addSyncStackTrace(retry, () => fs.unlink(filePath), this.#retriesOnEagain)
}
async _unlink(file, { dedup } = {}) {
const filePath = this.getFilePath(file)
let hash
// only try to read dedup source if we try to delete something deduplicated
if (dedup === true) {
try {
// get hash before deleting the file
hash = await this.#getExtendedAttribute(file, this.#attributeKey)
} catch (err) {
// whatever : fall back to normal delete
}
}
// delete file in place
await this.#localUnlink(filePath)
// implies we are on a deduplicated file
if (hash !== undefined) {
const dedupPath = this.getFilePath(this.#computeDeduplicationPath(hash))
await this.#removeExtendedAttribute(file, this.#attributeKey)
try {
const { nlink } = await fs.stat(dedupPath)
// get the number of copy still using these data
// delete source if it's alone
if (nlink === 1) {
await this.#localUnlink(dedupPath)
}
} catch (error) {
// no problem if another process deleted the source or if we unlink directly the source file
if (error.code !== 'ENOENT') {
throw error
}
}
}
}
_writeFd(file, buffer, position) {
return this.#addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
#localWriteFile(file, data, { flags }) {
return this.#addSyncStackTrace(fs.writeFile, this.getFilePath(file), data, { flag: flags })
}
async _writeFile(file, data, { flags, dedup }) {
if (dedup === true) {
// only compute support once , and only if needed
if (this.#supportDedup === undefined) {
const supported = await this.checkSupport()
this.#supportDedup = supported.hardLink === true && supported.extendedAttributes === true
}
if (this.#supportDedup) {
const hash = this.#hash(data)
// create the file (if not already present) in the store
const dedupPath = await this.#writeDeduplicationSource(hash, data)
// hard link to the target place
// this linked file will have the same extended attributes
// (used for unlink)
return this.#link(dedupPath, file)
}
}
// fallback
return this.#localWriteFile(file, data, { flags })
}
#hash(data) {
return createHash(this.#hashMethod).update(data).digest('hex')
}
async #getExtendedAttribute(file, attributeName) {
try{
return this._readFile(file+attributeName)
}catch(err){
if(err.code === 'ENOENT'){
return
}
throw err
}
}
async #setExtendedAttribute(file, attributeName, value) {
return this._writeFile(file+attributeName, value)
}
async #removeExtendedAttribute(file, attributeName){
return this._unlink(file+attributeName)
}
/*
async #getExtendedAttribute(file, attributeName) {
return new Promise((resolve, reject) => {
fsx.get(this.getFilePath(file), attributeName, (err, res) => {
if (err) {
reject(err)
} else {
// res is a buffer
// it is null if the file doesn't have this attribute
if (res !== null) {
resolve(res.toString('utf-8'))
}
resolve(undefined)
}
})
})
}
async #setExtendedAttribute(file, attributeName, value) {
return new Promise((resolve, reject) => {
fsx.set(this.getFilePath(file), attributeName, value, (err, res) => {
if (err) {
reject(err)
} else {
resolve(res)
}
})
})
}
async #removeExtendedAttribute(file, attributeName){
}
*/
// create a hard link between to files
#link(source, dest) {
return fs.link(this.getFilePath(source), this.getFilePath(dest))
}
// split path to keep a sane number of file per directory
#computeDeduplicationPath(hash) {
assert.strictEqual(hash.length % 4, 0)
let path = this.#dedupDirectory
for (let i = 0; i < hash.length; i++) {
if (i % 4 === 0) {
path += '/'
}
path += hash[i]
}
path += '.source'
return path
}
async #writeDeduplicationSource(hash, data) {
const path = this.#computeDeduplicationPath(hash)
try {
// flags ensures it fails if it already exists
// _outputfile will create the directory tree
await this._outputFile(path, data, { flags: 'wx' })
} catch (error) {
// if it is alread present : not a problem
if (error.code === 'EEXIST') {
// it should already have the extended attributes, nothing more to do
return path
}
throw error
}
try {
await this.#setExtendedAttribute(path, this.#attributeKey, hash)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
// if a concurrent process deleted the dedup : recreate it
return this.#writeDeduplicationSource(path, hash)
}
return path
}
/**
* delete empty dirs
* delete file source thath don't have any more links
*
* @returns Promise
*/
async deduplicationGarbageCollector(dir = this.#dedupDirectory, alreadyVisited = false) {
try {
await this._rmdir(dir)
return
} catch (error) {
if (error.code !== 'ENOTEMPTY') {
throw error
}
}
// the directory may not be empty after a first visit
if (alreadyVisited) {
return
}
const files = await this._list(dir)
await asyncEach(
files,
async file => {
const stat = await fs.stat(this.getFilePath(`${dir}/${file}`))
// have to check the stat to ensure we don't try to delete
// the directories : they don't have links
if (stat.isDirectory()) {
return this.deduplicationGarbageCollector(`${dir}/${file}`)
}
if (stat.nlink === 1) {
return fs.unlink(this.getFilePath(`${dir}/${file}`))
}
},
{ concurrency: 2 }
) // since we do a recursive traveral with a deep tree)
return this.deduplicationGarbageCollector(dir, true)
}
async deduplicationStats(dir = this.#dedupDirectory) {
let nbSourceBlocks = 0
let nbBlocks = 0
try {
const files = await this._list(dir)
await asyncEach(
files,
async file => {
const stat = await fs.stat(this.getFilePath(`${dir}/${file}`))
if (stat.isDirectory()) {
const { nbSourceBlocks: nbSourceInChild, nbBlocks: nbBlockInChild } = await this.deduplicationStats(
`${dir}/${file}`
)
nbSourceBlocks += nbSourceInChild
nbBlocks += nbBlockInChild
} else {
nbSourceBlocks++
nbBlocks += stat.nlink - 1 // ignore current
}
},
{ concurrency: 2 }
)
} catch (err) {
if (err.code !== 'ENOENT') {
throw err
}
}
return { nbSourceBlocks, nbBlocks }
}
@synchronized()
async checkSupport() {
const supported = await super.checkSupport()
const sourceFileName = normalizePath(`${Date.now()}.sourcededup`)
const destFileName = normalizePath(`${Date.now()}.destdedup`)
try {
const SIZE = 1024 * 1024
const data = await fromCallback(randomBytes, SIZE)
const hash = this.#hash(data)
await this._outputFile(sourceFileName, data, { flags: 'wx', dedup: false })
await this.#setExtendedAttribute(sourceFileName, this.#attributeKey, hash)
await this.#link(sourceFileName, destFileName)
const linkedData = await this._readFile(destFileName)
const { nlink } = await fs.stat(this.getFilePath(destFileName))
// contains the right data and the link counter
supported.hardLink = nlink === 2 && linkedData.equals(data)
supported.extendedAttributes = hash === (await this.#getExtendedAttribute(sourceFileName, this.#attributeKey))
} catch (error) {
warn(`error while testing the dedup`, { error })
} finally {
ignoreErrors.call(this._unlink(sourceFileName))
ignoreErrors.call(this._unlink(destFileName))
}
return supported
}
}

View File

@@ -0,0 +1,107 @@
import { after, beforeEach, describe, it } from 'node:test'
import assert from 'node:assert'
import fs from 'node:fs/promises'
import { getSyncedHandler } from './index.js'
import { Disposable, pFromCallback } from 'promise-toolbox'
import tmp from 'tmp'
import execa from 'execa'
import { rimraf } from 'rimraf'
import { randomBytes } from 'node:crypto'
// https://xkcd.com/221/
const data =
'H2GbLa0F2J4LHFLRwLP9zN4dGWJpdx1T6eGWra8BRlV9fBpRGtWIOSKXjU8y7fnxAWVGWpbYPYCwRigvxRSTcuaQsCtwvDNKMmFwYpsGMS14akgBD3EpOMPpKIRRySOsOeknpr48oopO1n9eq0PxGbOcY4Q9aojRu9rn1SMNyjq7YGzwVQEm6twA3etKGSYGvPJVTs2riXm7u6BhBh9VZtQDxQEy5ttkHiZUpgLi6QshSpMjL7dHco8k6gzGcxfpoyS5IzaQeXqDOeRjE6HNn27oUXpze5xRYolQhxA7IqdfzcYwWTqlaZb7UBUZoFCiFs5Y6vPlQVZ2Aw5YganLV1ZcIz78j6TAtXJAfXrDhksm9UteQul8RYT0Ur8AJRYgiGXOsXrWWBKm3CzZci6paLZ2jBmGfgVuBJHlvgFIjOHiVozjulGD4SwKQ2MNqUOylv89NTP1BsJuZ7MC6YCm5yix7FswoE7Y2NhDFqzEQvseRQFyz52AsfuqRY7NruKHlO7LOSI932che2WzxBAwy78Sk1eRHQLsZ37dLB4UkFFIq6TvyjJKznTMAcx9HDOSrFeke6KfsDB1A4W3BAxJk40oAcFMeM72Lg97sJExMJRz1m1nGQJEiGCcnll9G6PqEfHjoOhdDLgN2xewUyvbuRuKEXXxD1H6Tz1iWReyRGSagQNLXvqkKoHoxu3bvSi8nWrbtEY6K2eHLeF5bYubYGXc5VsfiCQNPEzQV4ECzaPdolRtbpRFMcB5aWK70Oew3HJkEcN7IkcXI9vlJKnFvFMqGOHKujd4Tyjhvru2UFh0dAkEwojNzz7W0XlASiXRneea9FgiJNLcrXNtBkvIgw6kRrgbXI6DPJdWDpm3fmWS8EpOICH3aTiXRLQUDZsReAaOsfau1FNtP4JKTQpG3b9rKkO5G7vZEWqTi69mtPGWmyOU47WL1ifJtlzGiFbZ30pcHMc0u4uopHwEQq6ZwM5S6NHvioxihhHQHO8JU2xvcjg5OcTEsXtMwIapD3re'
const hash = '09a3cd9e135114cb870a0b5cf0dfd3f4be994662d0c715b65bcfc5e3b635dd40'
const dataPath = 'xo-block-store/09a3/cd9e/1351/14cb/870a/0b5c/f0df/d3f4/be99/4662/d0c7/15b6/5bcf/c5e3/b635/dd40.source'
let dir
describe('dedup tests', () => {
beforeEach(async () => {
dir = await pFromCallback(cb => tmp.dir(cb))
})
after(async () => {
await rimraf(dir)
})
it('works in general case ', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/a/sub/folder/file', data, { dedup: true })
assert.doesNotReject(handler.list('xo-block-store'))
assert.strictEqual((await handler.list('xo-block-store')).length, 1)
assert.strictEqual((await handler.list('in/a/sub/folder')).length, 1)
assert.strictEqual((await handler.readFile('in/a/sub/folder/file')).toString('utf-8'), data)
const value = (await execa('getfattr', ['-n', 'user.hash.sha256', '--only-value', dir + '/in/a/sub/folder/file']))
.stdout
assert.strictEqual(value, hash)
// the source file is created
assert.strictEqual((await handler.readFile(dataPath)).toString('utf-8'), data)
await handler.outputFile('in/anotherfolder/file', data, { dedup: true })
assert.strictEqual((await handler.list('in/anotherfolder')).length, 1)
assert.strictEqual((await handler.readFile('in/anotherfolder/file')).toString('utf-8'), data)
await handler.unlink('in/a/sub/folder/file', { dedup: true })
// source is still here
assert.strictEqual((await handler.readFile(dataPath)).toString('utf-8'), data)
assert.strictEqual((await handler.readFile('in/anotherfolder/file')).toString('utf-8'), data)
await handler.unlink('in/anotherfolder/file', { dedup: true })
// source should have been deleted
assert.strictEqual(
(
await handler.list(
'xo-block-store/09a3/cd9e/1351/14cb/870a/0b5c/f0df/d3f4/be99/4662/d0c7/15b6/5bcf/c5e3/b635'
)
).length,
0
)
assert.strictEqual((await handler.list('in/anotherfolder')).length, 0)
})
})
it('garbage collector an stats ', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/anotherfolder/file', data, { dedup: true })
await handler.outputFile('in/anotherfolder/same', data, { dedup: true })
await handler.outputFile('in/a/sub/folder/file', randomBytes(1024), { dedup: true })
let stats = await handler.deduplicationStats()
assert.strictEqual(stats.nbBlocks, 3)
assert.strictEqual(stats.nbSourceBlocks, 2)
await fs.unlink(`${dir}/in/a/sub/folder/file`, { dedup: true })
assert.strictEqual((await handler.list('xo-block-store')).length, 2)
await handler.deduplicationGarbageCollector()
stats = await handler.deduplicationStats()
assert.strictEqual(stats.nbBlocks, 2)
assert.strictEqual(stats.nbSourceBlocks, 1)
assert.strictEqual((await handler.list('xo-block-store')).length, 1)
})
})
it('compute support', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
const supported = await handler.checkSupport()
assert.strictEqual(supported.hardLink, true, 'support hard link is not present in local fs')
assert.strictEqual(supported.extendedAttributes, true, 'support extended attributes is not present in local fs')
})
})
it('handles edge cases : source deleted', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/a/sub/folder/edge', data, { dedup: true })
await handler.unlink(dataPath, { dedup: true })
// no error if source si already deleted
await assert.doesNotReject(() => handler.unlink('in/a/sub/folder/edge', { dedup: true }))
})
})
it('handles edge cases : non deduplicated file ', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/a/sub/folder/edge', data, { dedup: false })
// no error if deleting a non dedup file with dedup flags
await assert.doesNotReject(() => handler.unlink('in/a/sub/folder/edge', { dedup: true }))
})
})
})

View File

@@ -20,7 +20,5 @@ export function split(path) {
return parts
}
// paths are made absolute otherwise fs.relative() would resolve them against working directory
export const relativeFromFile = (file, path) => relative(dirname(normalize(file)), normalize(path))
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -1,17 +0,0 @@
import { describe, it } from 'test'
import { strict as assert } from 'assert'
import { relativeFromFile } from './path.js'
describe('relativeFromFile()', function () {
for (const [title, args] of Object.entries({
'file absolute and path absolute': ['/foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file relative and path absolute': ['foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file absolute and path relative': ['/foo/bar/file.vhd', 'foo/baz/path.vhd'],
'file relative and path relative': ['foo/bar/file.vhd', 'foo/baz/path.vhd'],
})) {
it('works with ' + title, function () {
assert.equal(relativeFromFile(...args), '../baz/path.vhd')
})
}
})

View File

@@ -5,7 +5,6 @@ import {
CreateMultipartUploadCommand,
DeleteObjectCommand,
GetObjectCommand,
GetObjectLockConfigurationCommand,
HeadObjectCommand,
ListObjectsV2Command,
PutObjectCommand,
@@ -17,23 +16,22 @@ import { NodeHttpHandler } from '@aws-sdk/node-http-handler'
import { getApplyMd5BodyChecksumPlugin } from '@aws-sdk/middleware-apply-body-checksum'
import { Agent as HttpAgent } from 'http'
import { Agent as HttpsAgent } from 'https'
import pRetry from 'promise-toolbox/retry'
import { createLogger } from '@xen-orchestra/log'
import { PassThrough, Transform, pipeline } from 'stream'
import { decorateWith } from '@vates/decorate-with'
import { PassThrough, pipeline } from 'stream'
import { parse } from 'xo-remote-parser'
import copyStreamToBuffer from './_copyStreamToBuffer.js'
import guessAwsRegion from './_guessAwsRegion.js'
import RemoteHandlerAbstract from './abstract'
import { basename, join, split } from './path'
import { asyncEach } from '@vates/async-each'
import { pRetry } from 'promise-toolbox'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PART_NUMBER = 10000
const MIN_PART_SIZE = 5 * 1024 * 1024
const { debug, info, warn } = createLogger('xo:fs:s3')
const { warn } = createLogger('xo:fs:s3')
export default class S3Handler extends RemoteHandlerAbstract {
#bucket
@@ -74,47 +72,12 @@ export default class S3Handler extends RemoteHandlerAbstract {
}),
})
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
const parts = split(path)
this.#bucket = parts.shift()
this.#dir = join(...parts)
const WITH_RETRY = [
'_closeFile',
'_copy',
'_getInfo',
'_getSize',
'_list',
'_mkdir',
'_openFile',
'_outputFile',
'_read',
'_readFile',
'_rename',
'_rmdir',
'_truncate',
'_unlink',
'_write',
'_writeFile',
]
WITH_RETRY.forEach(functionName => {
if (this[functionName] !== undefined) {
// adding the retry on the top level mtehod won't
// cover when _functionName are called internally
this[functionName] = pRetry.wrap(this[functionName], {
delays: [100, 200, 500, 1000, 2000],
// these errors should not change on retry
when: err => !['EEXIST', 'EISDIR', 'ENOTEMPTY', 'ENOENT', 'ENOTDIR', 'EISDIR'].includes(err?.code),
onRetry(error) {
warn('retrying method on fs ', {
method: functionName,
attemptNumber: this.attemptNumber,
delay: this.delay,
error,
file: this.arguments?.[0],
})
},
})
}
})
}
get type() {
@@ -223,35 +186,11 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
async _outputStream(path, input, { streamLength, maxStreamLength = streamLength, validator }) {
// S3 storage is limited to 10K part, each part is limited to 5GB. And the total upload must be smaller than 5TB
// a bigger partSize increase the memory consumption of aws/lib-storage exponentially
let partSize
if (maxStreamLength === undefined) {
warn(`Writing ${path} to a S3 remote without a max size set will cut it to 50GB`, { path })
partSize = MIN_PART_SIZE // min size for S3
} else {
partSize = Math.min(Math.max(Math.ceil(maxStreamLength / MAX_PART_NUMBER), MIN_PART_SIZE), MAX_PART_SIZE)
}
// ensure we don't try to upload a stream to big for this partSize
let readCounter = 0
const MAX_SIZE = MAX_PART_NUMBER * partSize
const streamCutter = new Transform({
transform(chunk, encoding, callback) {
readCounter += chunk.length
if (readCounter > MAX_SIZE) {
callback(new Error(`read ${readCounter} bytes, maximum size allowed is ${MAX_SIZE} `))
} else {
callback(null, chunk)
}
},
})
async _outputStream(path, input, { validator }) {
// Workaround for "ReferenceError: ReadableStream is not defined"
// https://github.com/aws/aws-sdk-js-v3/issues/2522
const Body = new PassThrough()
pipeline(input, streamCutter, Body, () => {})
pipeline(input, Body, () => {})
const upload = new Upload({
client: this.#s3,
@@ -259,8 +198,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
...this.#createParams(path),
Body,
},
partSize,
leavePartsOnError: false,
})
await upload.done()
@@ -275,7 +212,27 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
// some objectstorage provider like backblaze, can answer a 500/503 routinely
// in this case we should retry, and let their load balancing do its magic
// https://www.backblaze.com/b2/docs/calling.html#error_handling
@decorateWith(pRetry.wrap, {
delays: [100, 200, 500, 1000, 2000],
when: e => e.$metadata?.httpStatusCode === 500,
onRetry(error) {
warn('retrying writing file', {
attemptNumber: this.attemptNumber,
delay: this.delay,
error,
file: this.arguments[0],
})
},
})
async _writeFile(file, data, options) {
if (options?.dedup ?? false) {
throw new Error(
"S3 remotes don't support deduplication from XO, please use the deduplication of your S3 provider if any"
)
}
return this.#s3.send(
new PutObjectCommand({
...this.#createParams(file),
@@ -444,32 +401,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
async _closeFile(fd) {}
async _sync() {
await super._sync()
try {
// if Object Lock is enabled, each upload must come with a contentMD5 header
// the computation of this md5 is memory-intensive, especially when uploading a stream
const res = await this.#s3.send(new GetObjectLockConfigurationCommand({ Bucket: this.#bucket }))
if (res.ObjectLockConfiguration?.ObjectLockEnabled === 'Enabled') {
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
// will automatically add the contentMD5 header to any upload to S3
debug(`Object Lock is enable, enable content md5 header`)
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
}
} catch (error) {
// maybe the account doesn't have enought privilege to query the object lock configuration
// be defensive and apply the md5 just in case
if (error.$metadata.httpStatusCode === 403) {
info(`s3 user doesnt have enough privilege to check for Object Lock, enable content MD5 header`)
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
} else if (error.Code === 'ObjectLockConfigurationNotFoundError' || error.$metadata.httpStatusCode === 501) {
info(`Object lock is not available or not configured, don't add the content MD5 header`)
} else {
throw error
}
}
}
useVhdDirectory() {
return true
}

View File

@@ -1,10 +0,0 @@
### make a remote immutable
launch the `xo-immutable-remote` command. The configuration is stored in the config file.
This script must be kept running to make file immutable reliably.
### make file mutable
launch the `xo-lift-remote-immutability` cli. The configuration is stored in the config file .
If the config file have a `liftEvery`, this script will contiue to run and check regularly if there are files to update.

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,41 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @xen-orchestra/immutable-backups
[![Package Version](https://badgen.net/npm/v/@xen-orchestra/immutable-backups)](https://npmjs.org/package/@xen-orchestra/immutable-backups) ![License](https://badgen.net/npm/license/@xen-orchestra/immutable-backups) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@xen-orchestra/immutable-backups)](https://bundlephobia.com/result?p=@xen-orchestra/immutable-backups) [![Node compatibility](https://badgen.net/npm/node/@xen-orchestra/immutable-backups)](https://npmjs.org/package/@xen-orchestra/immutable-backups)
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/immutable-backups):
```sh
npm install --save @xen-orchestra/immutable-backups
```
## Usage
### make a remote immutable
launch the `xo-immutable-remote` command. The configuration is stored in the config file.
This script must be kept running to make file immutable reliably.
### make file mutable
launch the `xo-lift-remote-immutability` cli. The configuration is stored in the config file .
If the config file have a `liftEvery`, this script will contiue to run and check regularly if there are files to update.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)

View File

@@ -1,10 +0,0 @@
import fs from 'node:fs/promises'
import { dirname, join } from 'node:path'
import isBackupMetadata from './isBackupMetadata.mjs'
export default async path => {
if (isBackupMetadata(path)) {
// snipe vm metadata cache to force XO to update it
await fs.unlink(join(dirname(path), 'cache.json.gz'))
}
}

View File

@@ -1,4 +0,0 @@
import { dirname } from 'node:path'
// check if we are handling file directly under a vhd directory ( bat, headr, footer,..)
export default path => dirname(path).endsWith('.vhd')

View File

@@ -1,46 +0,0 @@
import { load } from 'app-conf'
import { homedir } from 'os'
import { join } from 'node:path'
import ms from 'ms'
const APP_NAME = 'xo-immutable-backups'
const APP_DIR = new URL('.', import.meta.url).pathname
export default async function loadConfig() {
const config = await load(APP_NAME, {
appDir: APP_DIR,
ignoreUnknownFormats: true,
})
if (config.remotes === undefined || config.remotes?.length < 1) {
throw new Error(
'No remotes are configured in the config file, please add at least one [remotes.<remoteid>] with a root property pointing to the absolute path of the remote to watch'
)
}
if (config.liftEvery) {
config.liftEvery = ms(config.liftEvery)
}
for (const [remoteId, { indexPath, immutabilityDuration, root }] of Object.entries(config.remotes)) {
if (!root) {
throw new Error(
`Remote ${remoteId} don't have a root property,containing the absolute path to the root of a backup repository `
)
}
if (!immutabilityDuration) {
throw new Error(
`Remote ${remoteId} don't have a immutabilityDuration property to indicate the minimal duration the backups should be protected by immutability `
)
}
if (ms(immutabilityDuration) < ms('1d')) {
throw new Error(
`Remote ${remoteId} immutability duration is smaller than the minimum allowed (1d), current : ${immutabilityDuration}`
)
}
if (!indexPath) {
const basePath = indexPath ?? process.env.XDG_DATA_HOME ?? join(homedir(), '.local', 'share')
const immutabilityIndexPath = join(basePath, APP_NAME, remoteId)
config.remotes[remoteId].indexPath = immutabilityIndexPath
}
config.remotes[remoteId].immutabilityDuration = ms(immutabilityDuration)
}
return config
}

View File

@@ -1,14 +0,0 @@
# how often does the lift immutability script will run to check if
# some files need to be made mutable
liftEvery = 1h
# you can add as many remote as you want, if you change the id ( here : remote1)
#[remotes.remote1]
#root = "/mnt/ssd/vhdblock/" # the absolute path of the root of the backup repository
#immutabilityDuration = 7d # mandatory
# optional, default value is false will scan and update the index on start, can be expensive
#rebuildIndexOnStart = true
# the index path is optional, default in XDG_DATA_HOME, or if this is not set, in ~/.local/share
#indexPath = "/var/lib/" # will add automatically the application name immutable-backup

View File

@@ -1,33 +0,0 @@
import { describe, it } from 'node:test'
import assert from 'node:assert/strict'
import fs from 'node:fs/promises'
import path from 'node:path'
import { tmpdir } from 'node:os'
import * as Directory from './directory.mjs'
import { rimraf } from 'rimraf'
describe('immutable-backups/file', async () => {
it('really lock a directory', async () => {
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
const dataDir = path.join(dir, 'data')
await fs.mkdir(dataDir)
const immutDir = path.join(dir, '.immutable')
const filePath = path.join(dataDir, 'test')
await fs.writeFile(filePath, 'data')
await Directory.makeImmutable(dataDir, immutDir)
assert.strictEqual(await Directory.isImmutable(dataDir), true)
await assert.rejects(() => fs.writeFile(filePath, 'data'))
await assert.rejects(() => fs.appendFile(filePath, 'data'))
await assert.rejects(() => fs.unlink(filePath))
await assert.rejects(() => fs.rename(filePath, filePath + 'copy'))
await assert.rejects(() => fs.writeFile(path.join(dataDir, 'test2'), 'data'))
await assert.rejects(() => fs.rename(dataDir, dataDir + 'copy'))
await Directory.liftImmutability(dataDir, immutDir)
assert.strictEqual(await Directory.isImmutable(dataDir), false)
await fs.writeFile(filePath, 'data')
await fs.appendFile(filePath, 'data')
await fs.unlink(filePath)
await fs.rename(dataDir, dataDir + 'copy')
await rimraf(dir)
})
})

View File

@@ -1,21 +0,0 @@
import execa from 'execa'
import { unindexFile, indexFile } from './fileIndex.mjs'
export async function makeImmutable(dirPath, immutabilityCachePath) {
if (immutabilityCachePath) {
await indexFile(dirPath, immutabilityCachePath)
}
await execa('chattr', ['+i', '-R', dirPath])
}
export async function liftImmutability(dirPath, immutabilityCachePath) {
if (immutabilityCachePath) {
await unindexFile(dirPath, immutabilityCachePath)
}
await execa('chattr', ['-i', '-R', dirPath])
}
export async function isImmutable(path) {
const { stdout } = await execa('lsattr', ['-d', path])
return stdout[4] === 'i'
}

View File

@@ -1,114 +0,0 @@
# Imutability
the goal is to make a remote that XO can write, but not modify during the immutability duration set on the remote. That way, it's not possible for XO to delete or encrypt any backup during this period. It protects your backup agains ransomware, at least as long as the attacker does not have a root access to the remote server.
We target `governance` type of immutability, **the local root account of the remote server will be able to lift immutability**.
We use the file system capabilities, they are tested on the protection process start.
It is compatible with encryption at rest made by XO.
## Prerequisites
The commands must be run as root on the remote, or by a user with the `CAP_LINUX_IMMUTABLE` capability . On start, the protect process writes into the remote `imutability.json` file its status and the immutability duration.
the `chattr` and `lsattr` should be installed on the system
## Configuring
this package uses app-conf to store its config. The application name is `xo-immutable-backup`. A sample config file is provided in this package.
## Making a file immutable
when marking a file or a folder immutable, it create an alias file in the `<indexPath>/<DayOfFileCreation>/<sha256(fullpath)>`.
`indexPath` can be defined in the config file, otherwise `XDG_HOME` is used. If not available it goes to `~/.local/share`
This index is used when lifting the immutability of the remote, it will only look at the old enough `<indexPath>/<DayOfFileCreation>/` folders.
## Real time protecting
On start, the watcher will create the index if it does not exists.
It will also do a checkup to ensure immutability could work on this remote and handle the easiest issues.
The watching process depends on the backup type, since we don't want to make temporary files and cache immutable.
It won't protect files during upload, only when the files have been completly written on disk. Real time, in this case, means "protecting critical files as soon as possible after they are uploaded"
This can be alleviated by :
- Coupling immutability with encryption to ensure the file is not modified
- Making health check to ensure the data are exactly as the snapshot data
List of protected files :
```js
const PATHS = [
// xo configuration backupq
'xo-config-backups/*/*/data',
'xo-config-backups/*/*/data.json',
'xo-config-backups/*/*/metadata.json',
// pool backupq
'xo-pool-metadata-backups/*/metadata.json',
'xo-pool-metadata-backups/*/data',
// vm backups , xo-vm-backups/<vmuuid>/
'xo-vm-backups/*/*.json',
'xo-vm-backups/*/*.xva',
'xo-vm-backups/*/*.xva.checksum',
// xo-vm-backups/<vmuuid>/vdis/<jobid>/<vdiUuid>
'xo-vm-backups/*/vdis/*/*/*.vhd', // can be an alias or a vhd file
// for vhd directory :
'xo-vm-backups/*/vdis/*/*/data/*.vhd/bat',
'xo-vm-backups/*/vdis/*/*/data/*.vhd/header',
'xo-vm-backups/*/vdis/*/*/data/*.vhd/footer',
]
```
## Releasing protection on old enough files on a remote
the watcher will periodically check if some file must by unlocked
## Troubleshooting
### some files are still locked
add the `rebuildIndexOnStart` option to the config file
### make remote fully mutable again
- Update the immutability setting with a 0 duration
- launch the `liftProtection` cli.
- remove the `protectRemotes` service
### increasing the immutability duration
this will prolong immutable file, but won't protect files that are already out of immutability
### reducing the immutability duration
change the setting, and launch the `liftProtection` cli , or wait for next planed execution
### why are my incremental backups not marked as protected in XO ?
are not marked as protected in XO ?
For incremental backups to be marked as protected in XO, the entire chain must be under protection. To ensure at least 7 days of backups are protected, you need to set the immutability duration and retention at 14 days, the full backup interval at 7 days
That means that if the last backup chain is complete ( 7 backup ) it is completely under protection, and if not, the precedent chain is also under protection. K are key backups, and are delta
```
Kd Kdddddd Kdddddd K # 8 backups protected, 2 chains
K Kdddddd Kdddddd Kd # 9 backups protected, 2 chains
Kdddddd Kdddddd Kdd # 10 backups protected, 2 chains
Kddddd Kdddddd Kddd # 11 backups protected, 2 chains
Kdddd Kdddddd Kdddd # 12 backups protected, 2 chains
Kddd Kdddddd Kddddd # 13 backups protected, 2 chains
Kdd Kdddddd Kdddddd # 7 backups protected, 1 chain since precedent full is now mutable
Kd Kdddddd Kdddddd K # 8 backups protected, 2 chains
```
### Why doesn't the protect process start ?
- it should be run as root or by a user with the `CAP_LINUX_IMMUTABLE` capability
- the underlying file system should support immutability, especially the `chattr` and `lsattr` command
- logs are in journalctl

View File

@@ -1,29 +0,0 @@
import { describe, it } from 'node:test'
import assert from 'node:assert/strict'
import fs from 'node:fs/promises'
import path from 'node:path'
import * as File from './file.mjs'
import { tmpdir } from 'node:os'
import { rimraf } from 'rimraf'
describe('immutable-backups/file', async () => {
it('really lock a file', async () => {
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
const immutDir = path.join(dir, '.immutable')
const filePath = path.join(dir, 'test.ext')
await fs.writeFile(filePath, 'data')
assert.strictEqual(await File.isImmutable(filePath), false)
await File.makeImmutable(filePath, immutDir)
assert.strictEqual(await File.isImmutable(filePath), true)
await assert.rejects(() => fs.writeFile(filePath, 'data'))
await assert.rejects(() => fs.appendFile(filePath, 'data'))
await assert.rejects(() => fs.unlink(filePath))
await assert.rejects(() => fs.rename(filePath, filePath + 'copy'))
await File.liftImmutability(filePath, immutDir)
assert.strictEqual(await File.isImmutable(filePath), false)
await fs.writeFile(filePath, 'data')
await fs.appendFile(filePath, 'data')
await fs.unlink(filePath)
await rimraf(dir)
})
})

View File

@@ -1,24 +0,0 @@
import execa from 'execa'
import { unindexFile, indexFile } from './fileIndex.mjs'
// this work only on linux like systems
// this could work on windows : https://4sysops.com/archives/set-and-remove-the-read-only-file-attribute-with-powershell/
export async function makeImmutable(path, immutabilityCachePath) {
if (immutabilityCachePath) {
await indexFile(path, immutabilityCachePath)
}
await execa('chattr', ['+i', path])
}
export async function liftImmutability(filePath, immutabilityCachePath) {
if (immutabilityCachePath) {
await unindexFile(filePath, immutabilityCachePath)
}
await execa('chattr', ['-i', filePath])
}
export async function isImmutable(path) {
const { stdout } = await execa('lsattr', ['-d', path])
return stdout[4] === 'i'
}

View File

@@ -1,81 +0,0 @@
import { describe, it } from 'node:test'
import assert from 'node:assert/strict'
import fs from 'node:fs/promises'
import path from 'node:path'
import * as FileIndex from './fileIndex.mjs'
import * as Directory from './directory.mjs'
import { tmpdir } from 'node:os'
import { rimraf } from 'rimraf'
describe('immutable-backups/fileIndex', async () => {
it('index File changes', async () => {
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
const immutDir = path.join(dir, '.immutable')
const filePath = path.join(dir, 'test.ext')
await fs.writeFile(filePath, 'data')
await FileIndex.indexFile(filePath, immutDir)
await fs.mkdir(path.join(immutDir, 'NOTADATE'))
await fs.writeFile(path.join(immutDir, 'NOTADATE.file'), 'content')
let nb = 0
let index, target
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, 0)) {
assert.strictEqual(true, false, 'Nothing should be eligible for deletion')
}
nb = 0
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, -24 * 60 * 60 * 1000)) {
assert.strictEqual(target, filePath)
await fs.unlink(index)
nb++
}
assert.strictEqual(nb, 1)
await fs.rmdir(path.join(immutDir, 'NOTADATE'))
await fs.rm(path.join(immutDir, 'NOTADATE.file'))
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, -24 * 60 * 60 * 1000)) {
// should remove the empty dir
assert.strictEqual(true, false, 'Nothing should have stayed here')
}
assert.strictEqual((await fs.readdir(immutDir)).length, 0)
await rimraf(dir)
})
it('fails correctly', async () => {
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
const immutDir = path.join(dir, '.immutable')
await fs.mkdir(immutDir)
const placeholderFile = path.join(dir, 'test.ext')
await fs.writeFile(placeholderFile, 'data')
await FileIndex.indexFile(placeholderFile, immutDir)
const filePath = path.join(dir, 'test2.ext')
await fs.writeFile(filePath, 'data')
await FileIndex.indexFile(filePath, immutDir)
await assert.rejects(() => FileIndex.indexFile(filePath, immutDir), { code: 'EEXIST' })
await Directory.makeImmutable(immutDir)
await assert.rejects(() => FileIndex.unindexFile(filePath, immutDir), { code: 'EPERM' })
await Directory.liftImmutability(immutDir)
await rimraf(dir)
})
it('handles bomb index files', async () => {
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
const immutDir = path.join(dir, '.immutable')
await fs.mkdir(immutDir)
const placeholderFile = path.join(dir, 'test.ext')
await fs.writeFile(placeholderFile, 'data')
await FileIndex.indexFile(placeholderFile, immutDir)
const indexDayDir = path.join(immutDir, '1980,11-28')
await fs.mkdir(indexDayDir)
await fs.writeFile(path.join(indexDayDir, 'big'), Buffer.alloc(2 * 1024 * 1024))
assert.rejects(async () => {
let index, target
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, 0)) {
// should remove the empty dir
assert.strictEqual(true, false, `Nothing should have stayed here, got ${index} ${target}`)
}
})
await rimraf(dir)
})
})

View File

@@ -1,88 +0,0 @@
import { join } from 'node:path'
import { createHash } from 'node:crypto'
import fs from 'node:fs/promises'
import { dirname } from 'path'
const MAX_INDEX_FILE_SIZE = 1024 * 1024
function sha256(content) {
return createHash('sha256').update(content).digest('hex')
}
function formatDate(date) {
return date.toISOString().split('T')[0]
}
async function computeIndexFilePath(path, immutabilityIndexPath) {
const stat = await fs.stat(path)
const date = new Date(stat.birthtimeMs)
const day = formatDate(date)
const hash = sha256(path)
return join(immutabilityIndexPath, day, hash)
}
export async function indexFile(path, immutabilityIndexPath) {
const indexFilePath = await computeIndexFilePath(path, immutabilityIndexPath)
try {
await fs.writeFile(indexFilePath, path, { flag: 'wx' })
} catch (err) {
// missing dir: make it
if (err.code === 'ENOENT') {
await fs.mkdir(dirname(indexFilePath), { recursive: true })
await fs.writeFile(indexFilePath, path)
} else {
throw err
}
}
return indexFilePath
}
export async function unindexFile(path, immutabilityIndexPath) {
try {
const cacheFileName = await computeIndexFilePath(path, immutabilityIndexPath)
await fs.unlink(cacheFileName)
} catch (err) {
if (err.code !== 'ENOENT') {
throw err
}
}
}
export async function* listOlderTargets(immutabilityCachePath, immutabilityDuration) {
// walk all dir by day until the limit day
const limitDate = new Date(Date.now() - immutabilityDuration)
const limitDay = formatDate(limitDate)
const dir = await fs.opendir(immutabilityCachePath)
for await (const dirent of dir) {
if (dirent.isFile()) {
continue
}
// ensure we have a valid date
if (isNaN(new Date(dirent.name))) {
continue
}
// recent enough to be kept
if (dirent.name >= limitDay) {
continue
}
const subDirPath = join(immutabilityCachePath, dirent.name)
const subdir = await fs.opendir(subDirPath)
let nb = 0
for await (const hashFileEntry of subdir) {
const entryFullPath = join(subDirPath, hashFileEntry.name)
const { size } = await fs.stat(entryFullPath)
if (size > MAX_INDEX_FILE_SIZE) {
throw new Error(`Index file at ${entryFullPath} is too big, ${size} bytes `)
}
const targetPath = await fs.readFile(entryFullPath, { encoding: 'utf8' })
yield {
index: entryFullPath,
target: targetPath,
}
nb++
}
// cleanup older folder
if (nb === 0) {
await fs.rmdir(subDirPath)
}
}
}

View File

@@ -1 +0,0 @@
export default path => path.match(/xo-vm-backups\/[^/]+\/[^/]+\.json$/)

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env node
import fs from 'node:fs/promises'
import * as Directory from './directory.mjs'
import { createLogger } from '@xen-orchestra/log'
import { listOlderTargets } from './fileIndex.mjs'
import cleanXoCache from './_cleanXoCache.mjs'
import loadConfig from './_loadConfig.mjs'
const { info, warn } = createLogger('xen-orchestra:immutable-backups:liftProtection')
async function liftRemoteImmutability(immutabilityCachePath, immutabilityDuration) {
for await (const { index, target } of listOlderTargets(immutabilityCachePath, immutabilityDuration)) {
await Directory.liftImmutability(target, immutabilityCachePath)
await fs.unlink(index)
await cleanXoCache(target)
}
}
async function liftImmutability(remotes) {
for (const [remoteId, { indexPath, immutabilityDuration }] of Object.entries(remotes)) {
liftRemoteImmutability(indexPath, immutabilityDuration).catch(err =>
warn('error during watchRemote', { err, remoteId, indexPath, immutabilityDuration })
)
}
}
const { liftEvery, remotes } = await loadConfig()
if (liftEvery > 0) {
info('setup watcher for immutability lifting')
setInterval(async () => {
liftImmutability(remotes)
}, liftEvery)
} else {
liftImmutability(remotes)
}

Some files were not shown because too many files have changed in this diff Show More