Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
f618fcdaf8 feat(vhd): implement encryption on vhd directory 2022-06-18 11:12:30 +02:00
529 changed files with 10809 additions and 30260 deletions

View File

@@ -28,10 +28,8 @@ module.exports = {
},
},
{
files: ['*.{spec,test}.{,c,m}js'],
files: ['*.spec.{,c,m}js'],
rules: {
'n/no-unpublished-require': 'off',
'n/no-unpublished-import': 'off',
'n/no-unsupported-features/node-builtins': [
'error',
{

View File

@@ -6,10 +6,7 @@ labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
1. ⚠️ **If you don't follow this template, the issue will be closed**.
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
Are you using XOA or XO from the sources?
**XOA or XO from the sources?**
If XOA:
@@ -18,7 +15,6 @@ If XOA:
If XO from the sources:
- Provide **your commit number**. If it's older than a week, we won't investigate
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
@@ -42,6 +38,8 @@ If applicable, add screenshots to help explain your problem.
**Environment (please provide the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**

View File

@@ -4,6 +4,7 @@ about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**

View File

@@ -1,12 +1,13 @@
name: CI
on: push
on: [push]
jobs:
build:
name: Test
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Build docker image
run: docker-compose -f docker/docker-compose.dev.yml build
- name: Create the container and start the tests
run: docker-compose -f docker/docker-compose.dev.yml up --exit-code-from xo
- uses: actions/checkout@v3
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- run: docker-compose -f docker/docker-compose.dev.yml build
- run: docker-compose -f docker/docker-compose.dev.yml up

2
.gitignore vendored
View File

@@ -10,6 +10,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx lint-staged

View File

@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -9,16 +9,7 @@ class AggregateError extends Error {
}
}
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []

View File

@@ -1,8 +1,6 @@
'use strict'
const { describe, it, beforeEach } = require('test')
const assert = require('assert').strict
const { spy } = require('sinon')
/* eslint-env jest */
const { asyncEach } = require('./')
@@ -36,18 +34,12 @@ describe('asyncEach', () => {
})
it('works', async () => {
const iteratee = spy(async () => {})
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
await asyncEach.call(thisArg, iterable, iteratee)
assert.deepStrictEqual(
iteratee.thisValues,
Array.from(values, () => thisArg)
)
assert.deepStrictEqual(
iteratee.args,
Array.from(values, (value, index) => [value, index, iterable])
)
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
;[1, 2, 4].forEach(concurrency => {
it('respects a concurrency of ' + concurrency, async () => {
@@ -57,7 +49,7 @@ describe('asyncEach', () => {
values,
async () => {
++running
assert.deepStrictEqual(running <= concurrency, true)
expect(running).toBeLessThanOrEqual(concurrency)
await randomDelay()
--running
},
@@ -67,52 +59,40 @@ describe('asyncEach', () => {
})
it('stops on first error when stopOnError is true', async () => {
const tracker = new assert.CallTracker()
const error = new Error()
const iteratee = tracker.calls((_, i) => {
const iteratee = jest.fn((_, i) => {
if (i === 1) {
throw error
}
}, 2)
assert.deepStrictEqual(
await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true })),
error
)
})
tracker.verify()
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
it('rejects AggregateError when stopOnError is false', async () => {
const errors = []
const iteratee = spy(() => {
const iteratee = jest.fn(() => {
const error = new Error()
errors.push(error)
throw error
})
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
assert.deepStrictEqual(error.errors, errors)
assert.deepStrictEqual(
iteratee.args,
Array.from(values, (value, index) => [value, index, iterable])
)
expect(error.errors).toEqual(errors)
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
it('can be interrupted with an AbortSignal', async () => {
const tracker = new assert.CallTracker()
const ac = new AbortController()
const iteratee = tracker.calls((_, i) => {
const iteratee = jest.fn((_, i) => {
if (i === 1) {
ac.abort()
}
}, 2)
await assert.rejects(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal }), {
message: 'asyncEach aborted',
})
tracker.verify()
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
})
})
)

View File

@@ -24,17 +24,11 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"sinon": "^14.0.1",
"tap": "^16.3.0",
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('assert')
/* eslint-env jest */
const { coalesceCalls } = require('./')
@@ -24,13 +23,13 @@ describe('coalesceCalls', () => {
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
assert.strictEqual(await promise1, 'foo')
assert.strictEqual(await promise2, 'foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
assert.strictEqual(await promise3, 'bar')
expect(await promise3).toBe('bar')
})
})

View File

@@ -30,10 +30,6 @@
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert').strict
/* eslint-env jest */
const { compose } = require('./')
@@ -10,42 +9,43 @@ const mul3 = x => x * 3
describe('compose()', () => {
it('throws when no functions is passed', () => {
assert.throws(() => compose(), TypeError)
assert.throws(() => compose([]), TypeError)
expect(() => compose()).toThrow(TypeError)
expect(() => compose([])).toThrow(TypeError)
})
it('applies from left to right', () => {
assert.strictEqual(compose(add2, mul3)(5), 21)
expect(compose(add2, mul3)(5)).toBe(21)
})
it('accepts functions in an array', () => {
assert.strictEqual(compose([add2, mul3])(5), 21)
expect(compose([add2, mul3])(5)).toBe(21)
})
it('can apply from right to left', () => {
assert.strictEqual(compose({ right: true }, add2, mul3)(5), 17)
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
})
it('accepts options with functions in an array', () => {
assert.strictEqual(compose({ right: true }, [add2, mul3])(5), 17)
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
})
it('can compose async functions', async () => {
assert.strictEqual(
expect(
await compose(
{ async: true },
async x => x + 2,
async x => x * 3
)(5),
21
)
)(5)
).toBe(21)
})
it('forwards all args to first function', () => {
expect.assertions(1)
const expectedArgs = [Math.random(), Math.random()]
compose(
(...args) => {
assert.deepEqual(args, expectedArgs)
expect(args).toEqual(expectedArgs)
},
// add a second function to avoid the one function special case
Function.prototype
@@ -53,13 +53,15 @@ describe('compose()', () => {
})
it('forwards context to all functions', () => {
expect.assertions(2)
const expectedThis = {}
compose(
function () {
assert.strictEqual(this, expectedThis)
expect(this).toBe(expectedThis)
},
function () {
assert.strictEqual(this, expectedThis)
expect(this).toBe(expectedThis)
}
).call(expectedThis)
})

View File

@@ -19,10 +19,6 @@
"node": ">=7.6"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,7 +1,7 @@
'use strict'
const assert = require('assert')
const { describe, it } = require('test')
const { describe, it } = require('tap').mocha
const { decorateClass, decorateWith, decorateMethodsWith, perInstance } = require('./')

View File

@@ -26,9 +26,9 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"test": "tap"
},
"devDependencies": {
"test": "^3.2.1"
"tap": "^16.0.1"
}
}

View File

@@ -1,17 +1,16 @@
'use strict'
const { describe, it } = require('test')
const { useFakeTimers, spy, assert } = require('sinon')
/* eslint-env jest */
const { createDebounceResource } = require('./debounceResource')
const clock = useFakeTimers()
jest.useFakeTimers()
describe('debounceResource()', () => {
it('calls the resource disposer after 10 seconds', async () => {
const debounceResource = createDebounceResource()
const delay = 10e3
const dispose = spy()
const dispose = jest.fn()
const resource = await debounceResource(
Promise.resolve({
@@ -23,10 +22,10 @@ describe('debounceResource()', () => {
resource.dispose()
assert.notCalled(dispose)
expect(dispose).not.toBeCalled()
clock.tick(delay)
jest.advanceTimersByTime(delay)
assert.called(dispose)
expect(dispose).toBeCalled()
})
})

View File

@@ -1,14 +1,13 @@
'use strict'
const { describe, it } = require('test')
const { spy, assert } = require('sinon')
/* eslint-env jest */
const { deduped } = require('./deduped')
describe('deduped()', () => {
it('calls the resource function only once', async () => {
const value = {}
const getResource = spy(async () => ({
const getResource = jest.fn(async () => ({
value,
dispose: Function.prototype,
}))
@@ -18,13 +17,13 @@ describe('deduped()', () => {
const { value: v1 } = await dedupedGetResource()
const { value: v2 } = await dedupedGetResource()
assert.calledOnce(getResource)
assert.match(v1, value)
assert.match(v2, value)
expect(getResource).toHaveBeenCalledTimes(1)
expect(v1).toBe(value)
expect(v2).toBe(value)
})
it('only disposes the source disposable when its all copies dispose', async () => {
const dispose = spy()
const dispose = jest.fn()
const getResource = async () => ({
value: '',
dispose,
@@ -37,35 +36,35 @@ describe('deduped()', () => {
d1()
assert.notCalled(dispose)
expect(dispose).not.toHaveBeenCalled()
d2()
assert.calledOnce(dispose)
expect(dispose).toHaveBeenCalledTimes(1)
})
it('works with sync factory', () => {
const value = {}
const dispose = spy()
const dispose = jest.fn()
const dedupedGetResource = deduped(() => ({ value, dispose }))
const d1 = dedupedGetResource()
assert.match(d1.value, value)
expect(d1.value).toBe(value)
const d2 = dedupedGetResource()
assert.match(d2.value, value)
expect(d2.value).toBe(value)
d1.dispose()
assert.notCalled(dispose)
expect(dispose).not.toHaveBeenCalled()
d2.dispose()
assert.calledOnce(dispose)
expect(dispose).toHaveBeenCalledTimes(1)
})
it('no race condition on dispose before async acquisition', async () => {
const dispose = spy()
const dispose = jest.fn()
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
const d1 = await dedupedGetResource()
@@ -74,6 +73,6 @@ describe('deduped()', () => {
d1.dispose()
assert.notCalled(dispose)
expect(dispose).not.toHaveBeenCalled()
})
})

View File

@@ -14,22 +14,17 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.2",
"version": "0.1.1",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.4.0",
"@xen-orchestra/log": "^0.3.0",
"ensure-array": "^1.0.0"
},
"devDependencies": {
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -35,7 +35,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.1",
"version": "1.0.0",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --branches=72"

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,66 +0,0 @@
'use strict'
const LRU = require('lru-cache')
const Fuse = require('fuse-native')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
mtime: st.mtime || new Date(),
atime: st.atime || new Date(),
ctime: st.ctime || new Date(),
size: st.size !== undefined ? st.size : 0,
mode: st.mode === 'dir' ? 16877 : st.mode === 'file' ? 33188 : st.mode === 'link' ? 41453 : st.mode,
uid: st.uid !== undefined ? st.uid : process.getuid(),
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({
max: 16, // each cached block is 2MB in size
})
await vhd.readBlockAllocationTable()
const fuse = new Fuse(mountDir, {
async readdir(path, cb) {
if (path === '/') {
return cb(null, ['vhd0'])
}
cb(Fuse.ENOENT)
},
async getattr(path, cb) {
if (path === '/') {
return cb(
null,
stat({
mode: 'dir',
size: 4096,
})
)
}
if (path === '/vhd0') {
return cb(
null,
stat({
mode: 'file',
size: vhd.footer.currentSize,
})
)
}
cb(Fuse.ENOENT)
},
read(path, fd, buf, len, pos, cb) {
if (path === '/vhd0') {
return vhd.readRawData(pos, len, cache, buf).then(cb)
}
throw new Error(`read file ${path} not exists`)
},
})
return new Disposable(
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -1,29 +0,0 @@
{
"name": "@vates/fuse-vhd",
"version": "1.0.0",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/fuse-vhd",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
},
"dependencies": {
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.1.1"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert')
/* eslint-env jest */
const { MultiKeyMap } = require('./')
@@ -29,9 +28,9 @@ describe('MultiKeyMap', () => {
keys.forEach((key, i) => {
// copy the key to make sure the array itself is not the key
assert.strictEqual(map.get(key.slice()), values[i])
expect(map.get(key.slice())).toBe(values[i])
map.delete(key.slice())
assert.strictEqual(map.get(key.slice()), undefined)
expect(map.get(key.slice())).toBe(undefined)
})
})
})

View File

@@ -23,10 +23,6 @@
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,16 +0,0 @@
### `new NdbClient({address, exportname, secure = true, port = 10809})`
create a new nbd client
```js
import NbdClient from '@vates/nbd-client'
const client = new NbdClient({
address: 'MY_NBD_HOST',
exportname: 'MY_SECRET_EXPORT',
cert: 'Server certificate', // optional, will use encrypted link if provided
})
await client.connect()
const block = await client.readBlock(blockIndex, BlockSize)
await client.disconnect()
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,47 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/nbd-client
[![Package Version](https://badgen.net/npm/v/@vates/nbd-client)](https://npmjs.org/package/@vates/nbd-client) ![License](https://badgen.net/npm/license/@vates/nbd-client) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/nbd-client)](https://bundlephobia.com/result?p=@vates/nbd-client) [![Node compatibility](https://badgen.net/npm/node/@vates/nbd-client)](https://npmjs.org/package/@vates/nbd-client)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/nbd-client):
```
> npm install --save @vates/nbd-client
```
## Usage
### `new NdbClient({address, exportname, secure = true, port = 10809})`
create a new nbd client
```js
import NbdClient from '@vates/nbd-client'
const client = new NbdClient({
address: 'MY_NBD_HOST',
exportname: 'MY_SECRET_EXPORT',
cert: 'Server certificate', // optional, will use encrypted link if provided
})
await client.connect()
const block = await client.readBlock(blockIndex, BlockSize)
await client.disconnect()
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,42 +0,0 @@
'use strict'
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
exports.NBD_OPT_EXPORT_NAME = 1
exports.NBD_OPT_ABORT = 2
exports.NBD_OPT_LIST = 3
exports.NBD_OPT_STARTTLS = 5
exports.NBD_OPT_INFO = 6
exports.NBD_OPT_GO = 7
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
exports.NBD_FLAG_READ_ONLY = 1 << 1
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
exports.NBD_FLAG_SEND_FUA = 1 << 3
exports.NBD_FLAG_ROTATIONAL = 1 << 4
exports.NBD_FLAG_SEND_TRIM = 1 << 5
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
exports.NBD_CMD_FLAG_FUA = 1 << 0
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
exports.NBD_CMD_FLAG_DF = 1 << 2
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
exports.NBD_CMD_READ = 0
exports.NBD_CMD_WRITE = 1
exports.NBD_CMD_DISC = 2
exports.NBD_CMD_FLUSH = 3
exports.NBD_CMD_TRIM = 4
exports.NBD_CMD_CACHE = 5
exports.NBD_CMD_WRITE_ZEROES = 6
exports.NBD_CMD_BLOCK_STATUS = 7
exports.NBD_CMD_RESIZE = 8
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
exports.NBD_REPLY_ACK = 1
exports.NBD_DEFAULT_PORT = 10809
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024

View File

@@ -1,243 +0,0 @@
'use strict'
const assert = require('node:assert')
const { Socket } = require('node:net')
const { connect } = require('node:tls')
const {
INIT_PASSWD,
NBD_CMD_READ,
NBD_DEFAULT_BLOCK_SIZE,
NBD_DEFAULT_PORT,
NBD_FLAG_FIXED_NEWSTYLE,
NBD_FLAG_HAS_FLAGS,
NBD_OPT_EXPORT_NAME,
NBD_OPT_REPLY_MAGIC,
NBD_OPT_STARTTLS,
NBD_REPLY_ACK,
NBD_REPLY_MAGIC,
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
} = require('./constants.js')
const { fromCallback } = require('promise-toolbox')
const { readChunkStrict } = require('@vates/read-chunk')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
module.exports = class NbdClient {
#serverAddress
#serverCert
#serverPort
#serverSocket
#exportName
#exportSize
// AFAIK, there is no guaranty the server answers in the same order as the queries
// so we handle a backlog of command waiting for response and handle concurrency manually
#waitingForResponse // there is already a listenner waiting for a response
#nextCommandQueryId = BigInt(0)
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
this.#serverAddress = address
this.#serverPort = port
this.#exportName = exportname
this.#serverCert = cert
}
get exportSize() {
return this.#exportSize
}
async #tlsConnect() {
return new Promise((resolve, reject) => {
this.#serverSocket = connect({
socket: this.#serverSocket,
rejectUnauthorized: false,
cert: this.#serverCert,
})
this.#serverSocket.once('error', reject)
this.#serverSocket.once('secureConnect', () => {
this.#serverSocket.removeListener('error', reject)
resolve()
})
})
}
// mandatory , at least to start the handshake
async #unsecureConnect() {
this.#serverSocket = new Socket()
return new Promise((resolve, reject) => {
this.#serverSocket.connect(this.#serverPort, this.#serverAddress)
this.#serverSocket.once('error', reject)
this.#serverSocket.once('connect', () => {
this.#serverSocket.removeListener('error', reject)
resolve()
})
})
}
async connect() {
// first we connect to the serve without tls, and then we upgrade the connection
// to tls during the handshake
await this.#unsecureConnect()
await this.#handshake()
// reset internal state if we reconnected a nbd client
this.#commandQueryBacklog = new Map()
this.#waitingForResponse = false
}
async disconnect() {
await this.#serverSocket.destroy()
}
// we can use individual read/write from the socket here since there is no concurrency
async #sendOption(option, buffer = Buffer.alloc(0)) {
await this.#write(OPTS_MAGIC)
await this.#writeInt32(option)
await this.#writeInt32(buffer.length)
await this.#write(buffer)
assert.strictEqual(await this.#readInt64(), NBD_OPT_REPLY_MAGIC) // magic number everywhere
assert.strictEqual(await this.#readInt32(), option) // the option passed
assert.strictEqual(await this.#readInt32(), NBD_REPLY_ACK) // ACK
const length = await this.#readInt32()
assert.strictEqual(length, 0) // length
}
// we can use individual read/write from the socket here since there is only one handshake at once, no concurrency
async #handshake() {
assert((await this.#read(8)).equals(INIT_PASSWD))
assert((await this.#read(8)).equals(OPTS_MAGIC))
const flagsBuffer = await this.#read(2)
const flags = flagsBuffer.readInt16BE(0)
assert.strictEqual(flags & NBD_FLAG_FIXED_NEWSTYLE, NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
await this.#writeInt32(NBD_FLAG_FIXED_NEWSTYLE) // client also support NBD_FLAG_C_FIXED_NEWSTYLE
if (this.#serverCert !== undefined) {
// upgrade socket to TLS if needed
await this.#sendOption(NBD_OPT_STARTTLS)
await this.#tlsConnect()
}
// send export name we want to access.
// it's implictly closing the negociation phase.
await this.#write(OPTS_MAGIC)
await this.#writeInt32(NBD_OPT_EXPORT_NAME)
const exportNameBuffer = Buffer.from(this.#exportName)
await this.#writeInt32(exportNameBuffer.length)
await this.#write(exportNameBuffer)
// 8 (export size ) + 2 (flags) + 124 zero = 134
// must read all to ensure nothing stays in the buffer
const answer = await this.#read(134)
this.#exportSize = answer.readBigUInt64BE(0)
const transmissionFlags = answer.readInt16BE(8)
assert.strictEqual(transmissionFlags & NBD_FLAG_HAS_FLAGS, NBD_FLAG_HAS_FLAGS, 'NBD_FLAG_HAS_FLAGS') // must always be 1 by the norm
// note : xapi server always send NBD_FLAG_READ_ONLY (3) as a flag
}
#read(length) {
return readChunkStrict(this.#serverSocket, length)
}
#write(buffer) {
return fromCallback.call(this.#serverSocket, 'write', buffer)
}
async #readInt32() {
const buffer = await this.#read(4)
return buffer.readInt32BE(0)
}
async #readInt64() {
const buffer = await this.#read(8)
return buffer.readBigUInt64BE(0)
}
#writeInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeInt32BE(int)
return this.#write(buffer)
}
// when one read fail ,stop everything
async #rejectAll(error) {
this.#commandQueryBacklog.forEach(({ reject }) => {
reject(error)
})
await this.disconnect()
}
async #readBlockResponse() {
// ensure at most one read occur in parallel
if (this.#waitingForResponse) {
return
}
try {
this.#waitingForResponse = true
const magic = await this.#readInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
}
const error = await this.#readInt32()
if (error !== 0) {
// @todo use error code from constants.mjs
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = await this.#readInt64()
const query = this.#commandQueryBacklog.get(blockQueryId)
if (!query) {
throw new Error(` no query associated with id ${blockQueryId}`)
}
this.#commandQueryBacklog.delete(blockQueryId)
const data = await this.#read(query.size)
query.resolve(data)
this.#waitingForResponse = false
if (this.#commandQueryBacklog.size > 0) {
await this.#readBlockResponse()
}
} catch (error) {
// reject all the promises
// we don't need to call readBlockResponse on failure
// since we will empty the backlog
await this.#rejectAll(error)
}
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
// create and send command at once to ensure there is no concurrency issue
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a simple block read
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
buffer.writeInt32BE(size, 24)
return new Promise((resolve, reject) => {
// this will handle one block response, but it can be another block
// since server does not guaranty to handle query in order
this.#commandQueryBacklog.set(queryId, {
size,
resolve,
reject,
})
// really send the command to the server
this.#write(buffer).catch(reject)
// #readBlockResponse never throws directly
// but if it fails it will reject all the promises in the backlog
this.#readBlockResponse()
})
}
}

View File

@@ -1,76 +0,0 @@
'use strict'
const NbdClient = require('./index.js')
const { spawn } = require('node:child_process')
const fs = require('node:fs/promises')
const { test } = require('tap')
const tmp = require('tmp')
const { pFromCallback } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const FILE_SIZE = 2 * 1024 * 1024
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
const data = Buffer.alloc(size, 0)
for (let i = 0; i < size; i += 4) {
data.writeUInt32BE(i, i)
}
await fs.writeFile(tmpPath, data)
return tmpPath
}
test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
const nbdServer = spawn(
'nbdkit',
[
'file',
path,
'--newstyle', //
'--exit-with-parent',
'--read-only',
'--export-name=MY_SECRET_EXPORT',
],
{
stdio: ['inherit', 'inherit', 'inherit'],
}
)
const client = new NbdClient({
address: 'localhost',
exportname: 'MY_SECRET_EXPORT',
secure: false,
})
await client.connect()
tap.equal(client.exportSize, BigInt(FILE_SIZE))
const CHUNK_SIZE = 128 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
}
// read mutiple blocks in parallel
await asyncEach(
indexes,
async i => {
const block = await client.readBlock(i, CHUNK_SIZE)
let blockOk = true
let firstFail
for (let j = 0; j < CHUNK_SIZE; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
if (!blockOk && firstFail === undefined) {
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content`)
},
{ concurrency: 8 }
)
await client.disconnect()
nbdServer.kill()
await fs.unlink(path)
})

View File

@@ -1,35 +0,0 @@
{
"private": false,
"name": "@vates/nbd-client",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/nbd-client",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/nbd-client",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=14.0"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"promise-toolbox": "^0.21.0",
"xen-api": "^1.2.2"
},
"devDependencies": {
"tap": "^16.3.0",
"tmp": "^0.2.1"
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "tap *.spec.js"
}
}

View File

@@ -1,130 +0,0 @@
### Usual workflow
> This section presents how this library should be used to implement a classic two factor authentification.
#### Setup
```js
import { generateSecret, generateTotp } from '@vates/otp'
import QrCode from 'qrcode'
// Generates a secret that will be shared by both the service and the user:
const secret = generateSecret()
// Stores the secret in the service:
await currentUser.saveOtpSecret(secret)
// Generates an URI to present to the user
const uri = generateTotpUri({ secret })
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
const qr = await QrCode.toDataURL(uri)
```
#### Authentication
```js
import { verifyTotp } from '@vates/otp'
// Verifies a `token` entered by the user against a `secret` generated during setup.
if (await verifyTotp(token, { secret })) {
console.log('authenticated!')
}
```
### API
#### Secret
```js
import { generateSecret } from '@vates/otp'
const secret = generateSecret()
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
#### HOTP
> This is likely not what you want to use, see TOTP below instead.
```js
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
// a sequence number, see HOTP specification
const counter = 0
// generate a token
//
// optional params:
// - digits
const token = await generateHotp({ counter, secret })
// '239988'
// verify a token
//
// optional params:
// - digits
const isValid = await verifyHotp(token, { counter, secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
#### TOTP
```js
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
// generate a token
//
// optional params:
// - digits
// - period
// - timestamp
const token = await generateTotp({ secret })
// '632869'
// verify a token
//
// optional params:
// - digits
// - period
// - timestamp
// - window
const isValid = await verifyTotp(token, { secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
// - period
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
- `period = 30`: number of seconds a token is valid
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
#### Verification from URI
```js
import { verifyFromUri } from '@vates/otp'
// Verify the token using all the information contained in the URI
const isValid = await verifyFromUri(token, uri)
// true
```

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,163 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/otp
[![Package Version](https://badgen.net/npm/v/@vates/otp)](https://npmjs.org/package/@vates/otp) ![License](https://badgen.net/npm/license/@vates/otp) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/otp)](https://bundlephobia.com/result?p=@vates/otp) [![Node compatibility](https://badgen.net/npm/node/@vates/otp)](https://npmjs.org/package/@vates/otp)
> Minimal HTOP/TOTP implementation
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/otp):
```
> npm install --save @vates/otp
```
## Usage
### Usual workflow
> This section presents how this library should be used to implement a classic two factor authentification.
#### Setup
```js
import { generateSecret, generateTotp } from '@vates/otp'
import QrCode from 'qrcode'
// Generates a secret that will be shared by both the service and the user:
const secret = generateSecret()
// Stores the secret in the service:
await currentUser.saveOtpSecret(secret)
// Generates an URI to present to the user
const uri = generateTotpUri({ secret })
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
const qr = await QrCode.toDataURL(uri)
```
#### Authentication
```js
import { verifyTotp } from '@vates/otp'
// Verifies a `token` entered by the user against a `secret` generated during setup.
if (await verifyTotp(token, { secret })) {
console.log('authenticated!')
}
```
### API
#### Secret
```js
import { generateSecret } from '@vates/otp'
const secret = generateSecret()
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
#### HOTP
> This is likely not what you want to use, see TOTP below instead.
```js
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
// a sequence number, see HOTP specification
const counter = 0
// generate a token
//
// optional params:
// - digits
const token = await generateHotp({ counter, secret })
// '239988'
// verify a token
//
// optional params:
// - digits
const isValid = await verifyHotp(token, { counter, secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
#### TOTP
```js
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
// generate a token
//
// optional params:
// - digits
// - period
// - timestamp
const token = await generateTotp({ secret })
// '632869'
// verify a token
//
// optional params:
// - digits
// - period
// - timestamp
// - window
const isValid = await verifyTotp(token, { secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
// - period
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
- `period = 30`: number of seconds a token is valid
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
#### Verification from URI
```js
import { verifyFromUri } from '@vates/otp'
// Verify the token using all the information contained in the URI
const isValid = await verifyFromUri(token, uri)
// true
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,111 +0,0 @@
import { base32 } from 'rfc4648'
import { webcrypto } from 'node:crypto'
const { subtle } = webcrypto
function assert(name, value) {
if (!value) {
throw new TypeError('invalid value for param ' + name)
}
}
// https://github.com/google/google-authenticator/wiki/Key-Uri-Format
function generateUri(protocol, label, params) {
assert('label', typeof label === 'string')
assert('secret', typeof params.secret === 'string')
let path = encodeURIComponent(label)
const { issuer } = params
if (issuer !== undefined) {
path = encodeURIComponent(issuer) + ':' + path
}
const query = Object.entries(params)
.filter(_ => _[1] !== undefined)
.map(([key, value]) => key + '=' + encodeURIComponent(value))
.join('&')
return `otpauth://${protocol}/${path}?${query}`
}
export function generateSecret() {
// https://www.rfc-editor.org/rfc/rfc4226 recommends 160 bits (i.e. 20 bytes)
const data = new Uint8Array(20)
webcrypto.getRandomValues(data)
return base32.stringify(data, { pad: false })
}
const DIGITS = 6
// https://www.rfc-editor.org/rfc/rfc4226
export async function generateHotp({ counter, digits = DIGITS, secret }) {
const data = new Uint8Array(8)
new DataView(data.buffer).setBigInt64(0, BigInt(counter), false)
const key = await subtle.importKey(
'raw',
base32.parse(secret, { loose: true }),
{ name: 'HMAC', hash: 'SHA-1' },
false,
['sign', 'verify']
)
const digest = new DataView(await subtle.sign('HMAC', key, data))
const offset = digest.getUint8(digest.byteLength - 1) & 0xf
const p = digest.getUint32(offset) & 0x7f_ff_ff_ff
return String(p % Math.pow(10, digits)).padStart(digits, '0')
}
export function generateHotpUri({ counter, digits, issuer, label, secret }) {
assert('counter', typeof counter === 'number')
return generateUri('hotp', label, { counter, digits, issuer, secret })
}
export async function verifyHotp(token, opts) {
return token === (await generateHotp(opts))
}
function totpCounter(period = 30, timestamp = Math.floor(Date.now() / 1e3)) {
return Math.floor(timestamp / period)
}
// https://www.rfc-editor.org/rfc/rfc6238.html
export async function generateTotp({ period, timestamp, ...opts }) {
opts.counter = totpCounter(period, timestamp)
return await generateHotp(opts)
}
export function generateTotpUri({ digits, issuer, label, period, secret }) {
return generateUri('totp', label, { digits, issuer, period, secret })
}
export async function verifyTotp(token, { period, timestamp, window = 1, ...opts }) {
const counter = totpCounter(period, timestamp)
const end = counter + window
opts.counter = counter - window
while (opts.counter <= end) {
if (token === (await generateHotp(opts))) {
return true
}
opts.counter += 1
}
return false
}
export async function verifyFromUri(token, uri) {
const url = new URL(uri)
assert('protocol', url.protocol === 'otpauth:')
const { host } = url
const opts = Object.fromEntries(url.searchParams.entries())
if (host === 'hotp') {
return await verifyHotp(token, opts)
}
if (host === 'totp') {
return await verifyTotp(token, opts)
}
assert('host', false)
}

View File

@@ -1,112 +0,0 @@
import { strict as assert } from 'node:assert'
import { describe, it } from 'tap/mocha'
import {
generateHotp,
generateHotpUri,
generateSecret,
generateTotp,
generateTotpUri,
verifyHotp,
verifyTotp,
} from './index.mjs'
describe('generateSecret', function () {
it('generates a string of 32 chars', async function () {
const secret = generateSecret()
assert.equal(typeof secret, 'string')
assert.equal(secret.length, 32)
})
it('generates a different secret at each call', async function () {
assert.notEqual(generateSecret(), generateSecret())
})
})
describe('HOTP', function () {
it('generate and verify valid tokens', async function () {
for (const [token, opts] of Object.entries({
382752: {
counter: -3088,
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
},
163376: {
counter: 30598,
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
},
})) {
assert.equal(await generateHotp(opts), token)
assert(await verifyHotp(token, opts))
}
})
describe('generateHotpUri', function () {
const opts = {
counter: 59732,
label: 'the label',
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
}
Object.entries({
'without optional params': [
opts,
'otpauth://hotp/the%20label?counter=59732&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with issuer': [
{ ...opts, issuer: 'the issuer' },
'otpauth://hotp/the%20issuer:the%20label?counter=59732&issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with digits': [
{ ...opts, digits: 7 },
'otpauth://hotp/the%20label?counter=59732&digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
}).forEach(([title, [opts, uri]]) => {
it(title, async function () {
assert.strictEqual(generateHotpUri(opts), uri)
})
})
})
})
describe('TOTP', function () {
Object.entries({
'033702': {
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
timestamp: 1665416296,
period: 30,
},
107250: {
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
timestamp: 1665416674,
period: 60,
},
}).forEach(([token, opts]) => {
it('works', async function () {
assert.equal(await generateTotp(opts), token)
assert(await verifyTotp(token, opts))
})
})
describe('generateHotpUri', function () {
const opts = {
label: 'the label',
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
}
Object.entries({
'without optional params': [opts, 'otpauth://totp/the%20label?secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX'],
'with issuer': [
{ ...opts, issuer: 'the issuer' },
'otpauth://totp/the%20issuer:the%20label?issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with digits': [
{ ...opts, digits: 7 },
'otpauth://totp/the%20label?digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
}).forEach(([title, [opts, uri]]) => {
it(title, async function () {
assert.strictEqual(generateTotpUri(opts), uri)
})
})
})
})

View File

@@ -1,39 +0,0 @@
{
"private": false,
"name": "@vates/otp",
"description": "Minimal HTOP/TOTP implementation",
"keywords": [
"2fa",
"authenticator",
"hotp",
"otp",
"totp"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/otp",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"main": "index.mjs",
"repository": {
"directory": "@vates/otp",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=15"
},
"dependencies": {
"rfc4648": "^1.5.2"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^16.3.0"
}
}

View File

@@ -1,7 +1,7 @@
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
@@ -36,21 +36,6 @@ isBetween3And10(10)
// → false
```
### `not(predicate)`
> Returns a predicate that returns the negation of the predicate.
```js
const isEven = n => n % 2 === 0
const isOdd = not(isEven)
isOdd(1)
// true
isOdd(2)
// false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.

View File

@@ -19,7 +19,7 @@ Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
@@ -54,21 +54,6 @@ isBetween3And10(10)
// → false
```
### `not(predicate)`
> Returns a predicate that returns the negation of the predicate.
```js
const isEven = n => n % 2 === 0
const isOdd = not(isEven)
isOdd(1)
// true
isOdd(2)
// false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.

View File

@@ -51,22 +51,6 @@ exports.every = function every() {
}
}
const notPredicateTag = {}
exports.not = function not(predicate) {
if (isDefinedPredicate(predicate)) {
if (predicate.tag === notPredicateTag) {
return predicate.predicate
}
function notPredicate() {
return !predicate.apply(this, arguments)
}
notPredicate.predicate = predicate
notPredicate.tag = notPredicateTag
return notPredicate
}
}
exports.some = function some() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length

View File

@@ -3,14 +3,20 @@
const assert = require('assert/strict')
const { describe, it } = require('tap').mocha
const { every, not, some } = require('./')
const { every, some } = require('./')
const T = () => true
const F = () => false
const testArgHandling = fn => {
it('returns undefined if predicate is undefined', () => {
const testArgsHandling = fn => {
it('returns undefined if all predicates are undefined', () => {
assert.equal(fn(undefined), undefined)
assert.equal(fn([undefined]), undefined)
})
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('throws if it receives a non-predicate', () => {
@@ -18,15 +24,6 @@ const testArgHandling = fn => {
error.value = 3
assert.throws(() => fn(3), error)
})
}
const testArgsHandling = fn => {
testArgHandling(fn)
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('forwards this and arguments to predicates', () => {
const thisArg = 'qux'
@@ -39,21 +36,17 @@ const testArgsHandling = fn => {
})
}
const runTests = (fn, acceptMultiple, truthTable) =>
const runTests = (fn, truthTable) =>
it('works', () => {
truthTable.forEach(([result, ...predicates]) => {
if (acceptMultiple) {
assert.equal(fn(predicates)(), result)
} else {
assert.equal(predicates.length, 1)
}
assert.equal(fn(...predicates)(), result)
assert.equal(fn(predicates)(), result)
})
})
describe('every', () => {
testArgsHandling(every)
runTests(every, true, [
runTests(every, [
[true, T, T],
[false, T, F],
[false, F, T],
@@ -61,22 +54,9 @@ describe('every', () => {
])
})
describe('not', () => {
testArgHandling(not)
it('returns the original predicate if negated twice', () => {
assert.equal(not(not(T)), T)
})
runTests(not, false, [
[true, F],
[false, T],
])
})
describe('some', () => {
testArgsHandling(some)
runTests(some, true, [
runTests(some, [
[true, T, T],
[true, T, F],
[true, F, T],

View File

@@ -26,7 +26,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.1.0",
"version": "1.0.0",
"engines": {
"node": ">=6"
},

View File

@@ -1,9 +1,7 @@
'use strict'
const readChunk = (stream, size) =>
stream.closed || stream.readableEnded
? Promise.resolve(null)
: size === 0
size === 0
? Promise.resolve(Buffer.alloc(0))
: new Promise((resolve, reject) => {
function onEnd() {

View File

@@ -1,7 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert').strict
/* eslint-env jest */
const { Readable } = require('stream')
@@ -12,42 +11,35 @@ makeStream.obj = Readable.from
describe('readChunk', () => {
it('returns null if stream is empty', async () => {
assert.strictEqual(await readChunk(makeStream([])), null)
})
it('returns null if the stream is already ended', async () => {
const stream = await makeStream([])
await readChunk(stream)
assert.strictEqual(await readChunk(stream), null)
expect(await readChunk(makeStream([]))).toBe(null)
})
describe('with binary stream', () => {
it('returns the first chunk of data', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar'])), Buffer.from('foo'))
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
})
it('returns a chunk of the specified size (smaller than first)', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 2), Buffer.from('fo'))
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
})
it('returns a chunk of the specified size (larger than first)', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 4), Buffer.from('foob'))
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
})
it('returns less data if stream ends', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
})
it('returns an empty buffer if the specified size is 0', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
})
})
describe('with object stream', () => {
it('returns the first chunk of data verbatim', async () => {
const chunks = [{}, {}]
assert.strictEqual(await readChunk(makeStream.obj(chunks)), chunks[0])
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
})
})
})
@@ -63,15 +55,15 @@ const rejectionOf = promise =>
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended without data')
assert.strictEqual(error.chunk, undefined)
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended without data')
expect(error.chunk).toEqual(undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data')
assert.deepEqual(error.chunk, Buffer.from('foobar'))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended with not enough data')
expect(error.chunk).toEqual(Buffer.from('foobar'))
})
})

View File

@@ -19,19 +19,15 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.0.1",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"postversion": "npm publish --access public"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"devDependencies": {
"test": "^3.2.1"
}
}

View File

@@ -30,7 +30,6 @@ if (args.length === 0) {
${name} v${version}
`)
// eslint-disable-next-line n/no-process-exit
process.exit()
}

View File

@@ -1,8 +1,6 @@
'use strict'
const { describe, it } = require('test')
const assert = require('assert').strict
const sinon = require('sinon')
/* eslint-env jest */
const { asyncMapSettled } = require('./')
@@ -11,29 +9,26 @@ const noop = Function.prototype
describe('asyncMapSettled', () => {
it('works', async () => {
const values = [Math.random(), Math.random()]
const spy = sinon.spy(async v => v * 2)
const spy = jest.fn(async v => v * 2)
const iterable = new Set(values)
// returns an array containing the result of each calls
assert.deepStrictEqual(
await asyncMapSettled(iterable, spy),
values.map(value => value * 2)
)
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
for (let i = 0, n = values.length; i < n; ++i) {
// each call receive the current item as sole argument
assert.deepStrictEqual(spy.args[i], [values[i]])
expect(spy.mock.calls[i]).toEqual([values[i]])
// each call as this bind to the iterable
assert.deepStrictEqual(spy.thisValues[i], iterable)
expect(spy.mock.instances[i]).toBe(iterable)
}
})
it('can use a specified thisArg', () => {
const thisArg = {}
const spy = sinon.spy()
const spy = jest.fn()
asyncMapSettled(['foo'], spy, thisArg)
assert.deepStrictEqual(spy.thisValues[0], thisArg)
expect(spy.mock.instances[0]).toBe(thisArg)
})
it('rejects only when all calls as resolved', async () => {
@@ -60,22 +55,19 @@ describe('asyncMapSettled', () => {
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
assert.strictEqual(hasSettled, false)
expect(hasSettled).toBe(false)
defers[1].resolve()
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
assert.strictEqual(hasSettled, true)
await assert.rejects(promise, error)
expect(hasSettled).toBe(true)
await expect(promise).rejects.toBe(error)
})
it('issues when latest promise rejects', async () => {
const error = new Error()
await assert.rejects(
asyncMapSettled([1], () => Promise.reject(error)),
error
)
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
})
})

View File

@@ -31,11 +31,6 @@
"lodash": "^4.17.4"
},
"scripts": {
"postversion": "npm publish",
"test": "node--test"
},
"devDependencies": {
"sinon": "^14.0.1",
"test": "^3.2.1"
"postversion": "npm publish"
}
}

View File

@@ -7,7 +7,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.2.1",
"version": "0.2.0",
"engines": {
"node": ">=14"
},
@@ -17,7 +17,7 @@
},
"dependencies": {
"@vates/decorate-with": "^2.0.0",
"@xen-orchestra/log": "^0.4.0",
"@xen-orchestra/log": "^0.3.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
},

View File

@@ -5,6 +5,7 @@ const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
@@ -14,7 +15,7 @@ const configs = {
proposal: 'minimal',
},
'@babel/preset-env': {
debug: __PROD__,
debug: !__TEST__,
// disabled until https://github.com/babel/babel/issues/8323 is resolved
// loose: true,

View File

@@ -1,10 +1,11 @@
import { readFileSync } from 'fs'
import getopts from 'getopts'
'use strict'
const { version } = JSON.parse(readFileSync(new URL('package.json', import.meta.url)))
const getopts = require('getopts')
export function composeCommands(commands) {
return async function (args, prefix) {
const { version } = require('./package.json')
module.exports = commands =>
async function (args, prefix) {
const opts = getopts(args, {
alias: {
help: 'h',
@@ -29,6 +30,5 @@ xo-backups v${version}
return
}
return (await command.default)(args.slice(1), prefix + ' ' + commandName)
return command.main(args.slice(1), prefix + ' ' + commandName)
}
}

View File

@@ -1,9 +1,11 @@
import fs from 'fs/promises'
import { dirname } from 'path'
'use strict'
export * from 'fs/promises'
const { dirname } = require('path')
export const getSize = path =>
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
module.exports = fs
fs.getSize = path =>
fs.stat(path).then(
_ => _.size,
error => {
@@ -14,7 +16,7 @@ export const getSize = path =>
}
)
export async function mktree(path) {
fs.mktree = async function mkdirp(path) {
try {
await fs.mkdir(path)
} catch (error) {
@@ -24,8 +26,8 @@ export async function mktree(path) {
return
}
if (code === 'ENOENT') {
await mktree(dirname(path))
return mktree(path)
await mkdirp(dirname(path))
return mkdirp(path)
}
throw error
}
@@ -35,7 +37,7 @@ export async function mktree(path) {
// - single param for direct use in `Array#map`
// - files are prefixed with directory path
// - safer: returns empty array if path is missing or not a directory
export const readdir2 = path =>
fs.readdir2 = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
@@ -57,7 +59,7 @@ export const readdir2 = path =>
}
)
export async function symlink2(target, path) {
fs.symlink2 = async (target, path) => {
try {
await fs.symlink(target, path)
} catch (error) {

View File

@@ -0,0 +1,34 @@
'use strict'
// -----------------------------------------------------------------------------
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
const getopts = require('getopts')
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
const { resolve } = require('path')
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
module.exports = async function main(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
}

View File

@@ -1,38 +0,0 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
import { getSyncedHandler } from '@xen-orchestra/fs'
import getopts from 'getopts'
import { basename, dirname } from 'path'
import Disposable from 'promise-toolbox/Disposable'
import { pathToFileURL } from 'url'
export default async function cleanVms(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, vmDir =>
Disposable.use(getSyncedHandler({ url: pathToFileURL(dirname(vmDir)).href }), async handler => {
try {
await new RemoteAdapter(handler).cleanVm(basename(vmDir), {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
)
}

View File

@@ -1,10 +1,13 @@
import { mktree, readdir2, readFile, symlink2 } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import filenamify from 'filenamify'
import get from 'lodash/get.js'
import { dirname, join, relative } from 'path'
'use strict'
export default async function createSymlinkIndex([backupDir, fieldPath]) {
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')
const { dirname, join, relative } = require('path')
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
const indexDir = join(backupDir, 'indexes', filenamify(fieldPath))
await mktree(indexDir)

View File

@@ -1,13 +1,16 @@
import { readdir2, readFile, getSize } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import { createHash } from 'crypto'
import groupBy from 'lodash/groupBy.js'
import { dirname, resolve } from 'path'
'use strict'
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const { readdir2, readFile, getSize } = require('../_fs')
const sha512 = str => createHash('sha512').update(str).digest('hex')
const sum = values => values.reduce((a, b) => a + b)
export default async function info(vmDirs) {
module.exports = async function info(vmDirs) {
const jsonFiles = (
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
).flat()

View File

@@ -1,12 +1,11 @@
#!/usr/bin/env node
import { composeCommands } from './_composeCommands.mjs'
const importDefault = async path => (await import(path)).default
'use strict'
composeCommands({
require('./_composeCommands')({
'clean-vms': {
get default() {
return importDefault('./commands/clean-vms.mjs')
get main() {
return require('./commands/clean-vms')
},
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
@@ -19,14 +18,14 @@ composeCommands({
`,
},
'create-symlink-index': {
get default() {
return importDefault('./commands/create-symlink-index.mjs')
get main() {
return require('./commands/create-symlink-index')
},
usage: 'xo-vm-backups <field path>',
},
info: {
get default() {
return importDefault('./commands/info.mjs')
get main() {
return require('./commands/info')
},
usage: 'xo-vm-backups/*',
},

View File

@@ -1,21 +1,21 @@
{
"private": false,
"bin": {
"xo-backups": "index.mjs"
"xo-backups": "index.js"
},
"preferGlobal": true,
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.29.0",
"@xen-orchestra/fs": "^3.2.0",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/fs": "^1.0.3",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox":"^0.21.0"
"promise-toolbox": "^0.21.0"
},
"engines": {
"node": ">=14"
"node": ">=7.10.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.8",
"version": "0.7.3",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -43,7 +43,6 @@ const DEFAULT_VM_SETTINGS = {
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
useNbd: false,
unconditionalSnapshot: false,
vmTimeout: 0,
}
@@ -246,7 +245,7 @@ exports.Backup = class Backup {
})
)
),
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
() => settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined,
async (srs, remoteAdapters, healthCheckSr) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)

View File

@@ -15,22 +15,18 @@ const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, lstat } = require('fs-extra')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
// @todo : this import is marked extraneous , sould be fixed when lib is published
const { mount } = require('@vates/fuse-vhd')
const { asyncEach } = require('@vates/async-each')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
@@ -38,7 +34,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const { warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -48,13 +44,16 @@ const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
files.push({
realPath,
metadataPath,
@@ -78,14 +77,14 @@ const debounceResourceFactory = factory =>
class RemoteAdapter {
constructor(
handler,
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, vhdDirectoryEncryption } = {}
) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._vhdDirectoryEncryption = vhdDirectoryEncryption
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
this._useGetDiskLegacy = useGetDiskLegacy
}
get handler() {
@@ -133,9 +132,7 @@ class RemoteAdapter {
}
async *_getPartition(devicePath, partition) {
// the norecovery option is necessary because if the partition is dirty,
// mount will try to fix it which is impossible if because the device is read-only
const options = ['loop', 'ro', 'norecovery']
const options = ['loop', 'ro']
if (partition !== undefined) {
const { size, start } = partition
@@ -208,7 +205,9 @@ class RemoteAdapter {
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
? this.#useVhdDirectory() &&
this.#getCompressionType() === vhd.compressionType &&
this.#getEncryption() === vhd.encryption
: !this.#useVhdDirectory()
})
}
@@ -232,30 +231,11 @@ class RemoteAdapter {
return promise
}
#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -283,8 +263,6 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -305,29 +283,33 @@ class RemoteAdapter {
full !== undefined && this.deleteFullVmBackups(full),
])
await asyncMap(new Set(files.map(file => dirname(file))), dir =>
// - don't merge in main process, unused VHDs will be merged in the next backup run
// - don't error in case this fails:
// - if lock is already being held, a backup is running and cleanVm will be ran at the end
// - otherwise, there is nothing more we can do, orphan file will be cleaned in the future
this.cleanVm(dir, { remove: true, logWarn: warn }).catch(noop)
)
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
}
#getCompressionType() {
return this._vhdDirectoryCompression
}
#getEncryption() {
return this._vhdDirectoryEncryption
}
#useVhdDirectory() {
return this.handler.useVhdDirectory()
return this.handler.type === 's3'
}
#useAlias() {
return this.#useVhdDirectory()
}
async *#getDiskLegacy(diskId) {
const RE_VHDI = /^vhdi(\d+)$/
async *getDisk(diskId) {
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
@@ -357,20 +339,6 @@ class RemoteAdapter {
}
}
async *getDisk(diskId) {
if (this._useGetDiskLegacy) {
yield* this.#getDiskLegacy(diskId)
return
}
const handler = this._handler
// this is a disposable
const mountDir = yield getTmpDir()
// this is also a disposable
yield mount(handler, diskId, mountDir)
// this will yield disk path to caller
yield `${mountDir}/vhd0`
}
// partitionId values:
//
// - undefined: raw disk
@@ -421,25 +389,18 @@ class RemoteAdapter {
listPartitionFiles(diskId, partitionId, path) {
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncEach(
await readdir(path),
async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
await asyncMap(await readdir(path), async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
},
{ concurrency: 1 }
)
}
})
return entriesMap
})
@@ -504,42 +465,11 @@ class RemoteAdapter {
return backupsByPool
}
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)
await this.#writeCache(path, cache)
}
}
async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
const handler = this._handler
const backups = {}
@@ -575,26 +505,41 @@ class RemoteAdapter {
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const path = this.#getVmBackupsCache(vmUuid)
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
const cache = await this.#readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
const backups = await this.#getCachabledDataListVmBackups(dir)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeCache(path, backups)
this.#writeVmBackupsCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
@@ -633,40 +578,19 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})
// will not throw
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
// these values are required in the cache
_filename: path,
id: path,
}
})
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
concurrency: 16,
compression: this.#getCompressionType(),
encryption: this.#getEncryption(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {

View File

@@ -3,10 +3,8 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
const logAfterEnd = () => {
throw new Error('task has already ended')
}
const noop = Function.prototype

View File

@@ -128,49 +128,42 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, step, parallel = true) {
async _callWriters(fn, warnMessage, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
try {
await fn(writer)
} catch (error) {
writers.delete(writer)
throw error
}
return
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await callWriter(writer)
await fn(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
// these two steps are the only one that are not already in their own sub tasks
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
Task.warning(
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
}
}

View File

@@ -71,6 +71,7 @@ class BackupWorker {
debounceResource: this.debounceResource,
dirMode: this.#config.dirMode,
vhdDirectoryCompression: this.#config.vhdDirectoryCompression,
vhdDirectoryEncryption: this.#config.vhdDirectoryEncryption,
})
} finally {
await handler.forget()

View File

@@ -1,7 +1,6 @@
'use strict'
const { beforeEach, afterEach, test, describe } = require('test')
const assert = require('assert').strict
/* eslint-env jest */
const rimraf = require('rimraf')
const tmp = require('tmp')
@@ -15,8 +14,9 @@ const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
const rootPath = 'xo-vm-backups/VMUUID/'
let tempDir, adapter, handler, jobId, vdiId, basePath
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
@@ -25,8 +25,7 @@ beforeEach(async () => {
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
relativePath = `vdis/${jobId}/${vdiId}`
basePath = `${rootPath}/${relativePath}`
basePath = `vdis/${jobId}/${vdiId}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
@@ -36,7 +35,7 @@ afterEach(async () => {
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
async function generateVhd(path, opts = {}) {
let vhd
@@ -77,18 +76,18 @@ test('It remove broken vhd', async () => {
// todo also tests a directory and an alias
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
assert.equal((await handler.list(basePath)).length, 1)
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message
}
await adapter.cleanVm(rootPath, { remove: false, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued, `VHD check error`)
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
// not removed
assert.deepEqual(await handler.list(basePath), ['notReallyAVhd.vhd'])
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
assert.deepEqual(await handler.list(basePath), [])
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
})
test('it remove vhd with missing or multiple ancestors', async () => {
@@ -119,13 +118,15 @@ test('it remove vhd with missing or multiple ancestors', async () => {
)
// clean
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm('/', { remove: true, onLog })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
assert.equal(deletedOrphanVhd.length, 1) // only one vhd should have been deleted
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
@@ -133,12 +134,12 @@ test('it remove vhd with missing or multiple ancestors', async () => {
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${relativePath}/orphan.vhd`,
`${relativePath}/child.vhd`,
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
],
})
@@ -158,42 +159,44 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
})
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
assert.equal(matched.length, 1) // only one vhd should have been deleted
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`deleted.vhd`, // in metadata but not in vhds
`orphan.vhd`,
`child.vhd`,
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
assert.equal(matched.length, 2) // all vhds (orphan and child ) should have been deleted
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 12000, // a size too small
vhds: [
`${relativePath}/grandchild.vhd`, // grand child should not be merged
`${relativePath}/child.vhd`,
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
@@ -217,36 +220,36 @@ test('it merges delta of non destroyed chain', async () => {
})
let loggued = []
const logInfo = message => {
const onLog = message => {
loggued.push(message)
}
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued[0], `incorrect backup size in metadata`)
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [merging] = loggued
assert.equal(merging, `merging VHD chain`)
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
assert.equal(metadata.size, 209920)
expect(metadata.size).toEqual(209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
assert.equal(remainingVhds.length, 2)
assert.equal(remainingVhds.includes('child.vhd'), true)
assert.equal(remainingVhds.includes('grandchild.vhd'), true)
expect(remainingVhds.length).toEqual(2)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
})
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [`${relativePath}/orphan.vhd`, `${relativePath}/child.vhd`],
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
})
)
@@ -272,13 +275,13 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
assert.equal(remainingVhds.length, 1)
assert.equal(remainingVhds.includes('child.vhd'), true)
expect(remainingVhds.length).toEqual(1)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
})
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
@@ -368,22 +371,22 @@ describe('tests multiple combination ', () => {
// the metadata file
await handler.writeFile(
`${rootPath}/metadata.json`,
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${relativePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${relativePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${relativePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
@@ -393,19 +396,19 @@ describe('tests multiple combination ', () => {
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
assert.equal(dataSurvivors.includes('ancestor.vhd'), true)
assert.equal(dataSurvivors.includes('grandchild.vhd'), true)
assert.equal(dataSurvivors.includes('cleanAncestor.vhd'), true)
assert.equal(survivors.includes('clean.vhd.alias.vhd'), true)
assert.equal(survivors.includes('child.vhd.alias.vhd'), true)
assert.equal(survivors.includes('grandchild.vhd.alias.vhd'), true)
assert.equal(survivors.length, 4) // the 3 ok + data
assert.equal(dataSurvivors.length, 3)
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
} else {
assert.equal(survivors.includes('clean.vhd'), true)
assert.equal(survivors.includes('child.vhd'), true)
assert.equal(survivors.includes('grandchild.vhd'), true)
assert.equal(survivors.length, 3)
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
expect(survivors).toContain('grandchild.vhd')
expect(survivors.length).toEqual(3)
}
})
}
@@ -415,9 +418,9 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm(rootPath, { remove: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true })
assert.deepEqual(await handler.list(basePath), [])
expect(await handler.list(basePath)).toEqual([])
})
test('check Aliases should work alone', async () => {
@@ -430,16 +433,12 @@ test('check Aliases should work alone', async () => {
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
assert.equal(alias.length, 1)
expect(alias.length).toEqual(1)
const data = await handler.list('vhds/data')
assert.equal(data.length, 1)
expect(data.length).toEqual(1)
})

View File

@@ -1,27 +1,22 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
function shouldComputeVhdsSize(vhds) {
return vhds.every(vhd => vhd instanceof VhdFile)
}
@@ -29,49 +24,73 @@ const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(handler, vhds)) {
if (shouldComputeVhdsSize(vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
// chain is [ ancestor, child1, ..., childn]
// 1. Create a VhdSynthetic from all children
// 2. Merge the VhdSynthetic into the ancestor
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor with the merged data to the latest child
//
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, childn ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
assert(chain.length >= 2)
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
if (merge) {
logInfo(`merging VHD chain`, { chain })
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total})
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
const mergedSize = await mergeVhd(handler, parent, handler, children, {
onProgress({ done: d, total: t }) {
done = d
total = t
},
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
logInfo(`the VHD child is already merged`, { child })
if (remove) {
logInfo(`deleting merged VHD child`, { child })
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
}
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir, logWarn) => {
const listVhds = async (handler, vmDir) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
@@ -91,23 +110,12 @@ const listVhds = async (handler, vmDir, logWarn) => {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
await asyncMap(list, async file => {
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
} else {
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
}
})
}
@@ -123,15 +131,15 @@ async function checkAliases(
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
logWarn('alias references non VHD target', { path, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
logInfo('removing alias and non VHD target', { path, target })
await handler.unlink(target)
await handler.unlink(alias)
await handler.unlink(path)
}
continue
}
@@ -144,13 +152,13 @@ async function checkAliases(
// error during dispose should not trigger a deletion
}
} catch (error) {
logWarn('missing or broken alias target', { alias, target, error })
logWarn('missing or broken alias target', { target, path, error })
if (remove) {
try {
await VhdAbstract.unlink(handler, alias)
await VhdAbstract.unlink(handler, path)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { alias, target, error })
logWarn('error deleting alias target', { target, path, error })
}
}
}
@@ -160,17 +168,17 @@ async function checkAliases(
aliasFound.push(resolve('/', target))
}
const vhds = await handler.list(targetDataRepository, {
const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
logWarn('no alias references VHD', { entry })
if (remove) {
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
logInfo('deleting unaliased VHD')
await VhdAbstract.unlink(handler, entry)
}
}
})
@@ -182,26 +190,17 @@ const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhds, async path => {
@@ -219,31 +218,12 @@ exports.cleanVm = async function cleanVm(
}
vhdChildren[parent] = path
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
})
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
if (error?.code === 'ERR_ASSERTION' && remove) {
logInfo('deleting broken VHD', { path })
logInfo('deleting broken path', { path })
return VhdAbstract.unlink(handler, path)
}
}
@@ -252,7 +232,7 @@ exports.cleanVm = async function cleanVm(
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const { statePath } = interruptedVhds.get(interruptedVhd)
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
logWarn('orphan merge state', {
@@ -291,9 +271,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
logWarn('parent VHD is missing', { parent, child: vhdPath })
logWarn('parent VHD is missing', { parent, vhdPath })
if (remove) {
logInfo('deleting orphan VHD', { path: vhdPath })
logInfo('deleting orphan VHD', { vhdPath })
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -311,7 +291,6 @@ exports.cleanVm = async function cleanVm(
}
const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -345,7 +324,7 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
logWarn('failed to read backup metadata', { path: json, error })
logWarn('failed to read metadata file', { json, error })
jsons.delete(json)
return
}
@@ -356,11 +335,10 @@ exports.cleanVm = async function cleanVm(
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
logWarn('metadata XVA is missing', { json })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
logInfo('deleting incomplete backup', { json })
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
@@ -380,10 +358,9 @@ exports.cleanVm = async function cleanVm(
vhdsToJSons[path] = json
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
logWarn('some metadata VHDs are missing', { json, missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
logInfo('deleting incomplete backup', { json })
jsons.delete(json)
await handler.unlink(json)
}
@@ -395,7 +372,7 @@ exports.cleanVm = async function cleanVm(
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -419,14 +396,14 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.unshift(vhd)
chain.push(vhd)
return chain
}
}
logWarn('unused VHD', { path: vhd })
logWarn('unused VHD', { vhd })
if (remove) {
logInfo('deleting unused VHD', { path: vhd })
logInfo('deleting unused VHD', { vhd })
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -437,13 +414,7 @@ exports.cleanVm = async function cleanVm(
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
}
Object.values(vhdChainsToMerge).forEach(chain => {
@@ -456,15 +427,9 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
@@ -507,11 +472,7 @@ exports.cleanVm = async function cleanVm(
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
fileSystemSize = await handler.getSize(linkedXva)
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
@@ -523,15 +484,11 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
}
}
} catch (error) {
logWarn('failed to get backup size', { backup: metadataPath, error })
logWarn('failed to get metadata size', { metadataPath, error })
return
}
@@ -541,16 +498,11 @@ exports.cleanVm = async function cleanVm(
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
logWarn('metadata size update failed', { metadataPath, error })
}
}
})
// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
}
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,12 +1,12 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { compareVersions } = require('compare-versions')
const { createVhdStreamWithLength } = require('vhd-lib')
const { defer } = require('golike-defer')

View File

@@ -3,8 +3,6 @@
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
@@ -13,23 +11,18 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: stream.forks })
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
proxy.destroy(error)
}
})
eos(proxy, error => {
debug('end of stream, unpiping', { error, forks: --stream.forks })
eos(proxy, _ => {
stream.forks--
stream.unpipe(proxy)
if (stream.forks === 0) {
debug('no more forks, destroying original stream')
stream.destroy(new Error('no more consumers for this stream'))
}
})

View File

@@ -49,11 +49,6 @@ const isValidTar = async (handler, size, fd) => {
// TODO: find an heuristic for compressed files
async function isValidXva(path) {
const handler = this._handler
// size is longer when encrypted + reading part of an encrypted file is not implemented
if (handler.isEncrypted) {
return true
}
try {
const fd = await handler.openFile(path, 'r')
try {
@@ -71,6 +66,7 @@ async function isValidXva(path) {
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}

View File

@@ -14,14 +14,12 @@
## File structure on remote
### with vhd files
```
<remote>
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ cache.json.gz
├─ index.json // TODO
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
@@ -32,31 +30,6 @@
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
### with vhd directories
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
```
<vdis>/<job UUID>/<VDI UUID>
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
└─ data
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
└─ <uuid>.vhd
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
Add the following file: `xo-vm-backups/<VM UUID>/cache.json.gz`.
This cache is compressed in Gzip and contains an JSON object with the metadata for all the backups of this VM indexed by their absolute path (i.e. `/xo-vm-backups/<VM UUID>/<timestamp>.json`).
This file is generated on demande when listing the backups, and directly updated on backup creation/deletion.
In case any incoherence is detected, the file is deleted so it will be fully generated when required.
## Attributes
### Of created snapshots

View File

@@ -64,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {

View File

@@ -8,28 +8,24 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.29.0",
"version": "0.25.0",
"engines": {
"node": ">=14.6"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "node--test"
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.2",
"@vates/fuse-vhd": "^1.0.0",
"@vates/nbd-client": "*",
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.2.0",
"@xen-orchestra/log": "^0.4.0",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^5.0.1",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
@@ -41,18 +37,16 @@
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^9.0.0",
"vhd-lib": "^4.1.1",
"uuid": "^8.3.2",
"vhd-lib": "^3.2.0",
"yazl": "^2.5.1"
},
"devDependencies": {
"rimraf": "^3.0.2",
"sinon": "^14.0.1",
"test": "^3.2.1",
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.5.2"
"@xen-orchestra/xapi": "^1.2.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -19,9 +19,10 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const NbdClient = require('@vates/nbd-client')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { debug, warn } = createLogger('xo:backups:DeltaBackupWriter')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
async checkBaseVdis(baseUuidToSrcVdi) {
@@ -37,7 +38,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
@@ -71,6 +71,35 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
return this._cleanVm({ merge: true })
}
healthCheck(sr) {
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -160,6 +189,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataContent = {
jobId,
mode: job.mode,
@@ -200,30 +230,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
if (!this._backup.config.useNbd) {
// get nbd if possible
try {
// this will always take the first host in the list
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
debug(`got nbd connection `, { vdi: vdi.uuid })
} catch (error) {
nbdClient = undefined
debug(`can't connect to nbd server or no server available`, { error })
}
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {
@@ -243,7 +254,9 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
})
metadataContent.size = size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
// TODO: run cleanup?
}

View File

@@ -34,6 +34,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -49,6 +50,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const dataBasename = basename + '.xva'
const dataFilename = backupDir + '/' + dataBasename
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,
@@ -72,7 +74,9 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
if (!deleteFirst) {
await deleteOldBackups()

View File

@@ -3,13 +3,10 @@
const { createLogger } = require('@xen-orchestra/log')
const { join } = require('path')
const assert = require('assert')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { Task } = require('../Task.js')
const MergeWorker = require('../merge-worker/index.js')
const { formatFilenameDate } = require('../_filenameDate.js')
const { Task } = require('../Task.js')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
@@ -39,7 +36,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
Task.warning(message, data)
},
lock: false,
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
})
})
} catch (error) {
@@ -75,39 +71,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
}
healthCheck(sr) {
assert.notStrictEqual(
this._metadataFileName,
undefined,
'Metadata file name should be defined before making a healthcheck'
)
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}
}

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.2.2"
"xen-api": "^1.2.1"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,20 +1,24 @@
'use strict'
/* eslint-env jest */
const test = require('test')
const assert = require('assert').strict
const sinon = require('sinon')
'use strict'
const { createSchedule } = require('./')
const clock = sinon.useFakeTimers()
jest.useFakeTimers()
const wrap = value => () => value
test('issues', async t => {
describe('issues', () => {
let originalDateNow
originalDateNow = Date.now
beforeAll(() => {
originalDateNow = Date.now
})
afterAll(() => {
Date.now = originalDateNow
originalDateNow = undefined
})
await t.test('stop during async execution', async () => {
test('stop during async execution', async () => {
let nCalls = 0
let resolve, promise
@@ -31,20 +35,20 @@ test('issues', async t => {
job.start()
Date.now = wrap(+schedule.next(1)[0])
clock.runAll()
jest.runAllTimers()
assert.strictEqual(nCalls, 1)
expect(nCalls).toBe(1)
job.stop()
resolve()
await promise
clock.runAll()
assert.strictEqual(nCalls, 1)
jest.runAllTimers()
expect(nCalls).toBe(1)
})
await t.test('stop then start during async job execution', async () => {
test('stop then start during async job execution', async () => {
let nCalls = 0
let resolve, promise
@@ -61,9 +65,9 @@ test('issues', async t => {
job.start()
Date.now = wrap(+schedule.next(1)[0])
clock.runAll()
jest.runAllTimers()
assert.strictEqual(nCalls, 1)
expect(nCalls).toBe(1)
job.stop()
job.start()
@@ -72,10 +76,7 @@ test('issues', async t => {
await promise
Date.now = wrap(+schedule.next(1)[0])
clock.runAll()
assert.strictEqual(nCalls, 2)
jest.runAllTimers()
expect(nCalls).toBe(2)
})
Date.now = originalDateNow
originalDateNow = undefined
})

View File

@@ -1,7 +1,6 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const assert = require('assert').strict
'use strict'
const mapValues = require('lodash/mapValues')
const moment = require('moment-timezone')
@@ -26,24 +25,24 @@ describe('next()', () => {
},
([pattern, result], title) =>
it(title, () => {
assert.strictEqual(N(pattern), result)
expect(N(pattern)).toBe(result)
})
)
it('select first between month-day and week-day', () => {
assert.strictEqual(N('* * 10 * wen'), '2018-04-10T00:00')
assert.strictEqual(N('* * 12 * wen'), '2018-04-11T00:00')
expect(N('* * 10 * wen')).toBe('2018-04-10T00:00')
expect(N('* * 12 * wen')).toBe('2018-04-11T00:00')
})
it('select the last available day of a month', () => {
assert.strictEqual(N('* * 29 feb *'), '2020-02-29T00:00')
expect(N('* * 29 feb *')).toBe('2020-02-29T00:00')
})
it('fails when no solutions has been found', () => {
assert.throws(() => N('0 0 30 feb *'), { message: 'no solutions found for this schedule' })
expect(() => N('0 0 30 feb *')).toThrow('no solutions found for this schedule')
})
it('select the first sunday of the month', () => {
assert.strictEqual(N('* * * * 0', '2018-03-31T00:00'), '2018-04-01T00:00')
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
})
})

View File

@@ -38,11 +38,6 @@
"moment-timezone": "^0.5.14"
},
"scripts": {
"postversion": "npm publish",
"test": "node--test"
},
"devDependencies": {
"sinon": "^14.0.1",
"test": "^3.2.1"
"postversion": "npm publish"
}
}

View File

@@ -0,0 +1,49 @@
/* eslint-env jest */
'use strict'
const parse = require('./parse')
describe('parse()', () => {
it('works', () => {
expect(parse('0 0-10 */10 jan,2,4-11/3 *')).toEqual({
minute: [0],
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dayOfMonth: [1, 11, 21, 31],
month: [0, 2, 4, 7, 10],
})
})
it('correctly parse months', () => {
expect(parse('* * * 0,11 *')).toEqual({
month: [0, 11],
})
expect(parse('* * * jan,dec *')).toEqual({
month: [0, 11],
})
})
it('correctly parse days', () => {
expect(parse('* * * * mon,sun')).toEqual({
dayOfWeek: [0, 1],
})
})
it('reports missing integer', () => {
expect(() => parse('*/a')).toThrow('minute: missing integer at character 2')
expect(() => parse('*')).toThrow('hour: missing integer at character 1')
})
it('reports invalid aliases', () => {
expect(() => parse('* * * jan-foo *')).toThrow('month: missing alias or integer at character 10')
})
it('dayOfWeek: 0 and 7 bind to sunday', () => {
expect(parse('* * * * 0')).toEqual({
dayOfWeek: [0],
})
expect(parse('* * * * 7')).toEqual({
dayOfWeek: [0],
})
})
})

View File

@@ -1,50 +0,0 @@
'use strict'
const { describe, it } = require('test')
const assert = require('assert').strict
const parse = require('./parse')
describe('parse()', () => {
it('works', () => {
assert.deepStrictEqual(parse('0 0-10 */10 jan,2,4-11/3 *'), {
minute: [0],
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dayOfMonth: [1, 11, 21, 31],
month: [0, 2, 4, 7, 10],
})
})
it('correctly parse months', () => {
assert.deepStrictEqual(parse('* * * 0,11 *'), {
month: [0, 11],
})
assert.deepStrictEqual(parse('* * * jan,dec *'), {
month: [0, 11],
})
})
it('correctly parse days', () => {
assert.deepStrictEqual(parse('* * * * mon,sun'), {
dayOfWeek: [0, 1],
})
})
it('reports missing integer', () => {
assert.throws(() => parse('*/a'), { message: 'minute: missing integer at character 2' })
assert.throws(() => parse('*'), { message: 'hour: missing integer at character 1' })
})
it('reports invalid aliases', () => {
assert.throws(() => parse('* * * jan-foo *'), { message: 'month: missing alias or integer at character 10' })
})
it('dayOfWeek: 0 and 7 bind to sunday', () => {
assert.deepStrictEqual(parse('* * * * 0'), {
dayOfWeek: [0],
})
assert.deepStrictEqual(parse('* * * * 7'), {
dayOfWeek: [0],
})
})
})

View File

@@ -1,19 +0,0 @@
## metadata files
- Older remotes dont have any metadata file
- Remote used since 5.75 have two files : encryption.json and metadata.json
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
If the remote is empty, the `sync` method creates them
### encryption.json
A non encrypted file contain the algorithm and parameters used for this remote.
This MUST NOT contains the key.
### metadata.json
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "3.2.0",
"version": "1.0.3",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -17,20 +17,20 @@
"xo-fs": "./cli.js"
},
"engines": {
"node": ">=14.13"
"node": ">=14"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.54.0",
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@marsaud/smb2": "^0.18.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^1.0.0",
"@vates/async-each": "^0.1.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.4.0",
"@xen-orchestra/log": "^0.3.0",
"bind-property-descriptor": "^2.0.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",
@@ -40,9 +40,9 @@
"lodash": "^4.17.4",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"readable-stream": "^4.1.0",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",
"xo-remote-parser": "^0.9.2"
"xo-remote-parser": "^0.8.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -50,11 +50,11 @@
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -68,9 +68,5 @@
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"exports": {
".": "./dist/index.js",
"./path": "./dist/path.js"
}
}

View File

@@ -1,135 +0,0 @@
const { pipeline } = require('node:stream')
const { readChunk } = require('@vates/read-chunk')
const crypto = require('crypto')
export const DEFAULT_ENCRYPTION_ALGORITHM = 'aes-256-gcm'
export const UNENCRYPTED_ALGORITHM = 'none'
export function isLegacyEncryptionAlgorithm(algorithm) {
return algorithm !== UNENCRYPTED_ALGORITHM && algorithm !== DEFAULT_ENCRYPTION_ALGORITHM
}
function getEncryptor(algorithm = DEFAULT_ENCRYPTION_ALGORITHM, key) {
if (key === undefined) {
return {
id: 'NULL_ENCRYPTOR',
algorithm: 'none',
key: 'none',
ivLength: 0,
encryptData: buffer => buffer,
encryptStream: stream => stream,
decryptData: buffer => buffer,
decryptStream: stream => stream,
}
}
const info = crypto.getCipherInfo(algorithm, { keyLength: key.length })
if (info === undefined) {
const error = new Error(
`Either the algorithm ${algorithm} is not available, or the key length ${
key.length
} is incorrect. Supported algorithm are ${crypto.getCiphers()}`
)
error.code = 'BAD_ALGORITHM'
throw error
}
const { ivLength, mode } = info
const authTagLength = ['gcm', 'ccm', 'ocb'].includes(mode) ? 16 : 0
function encryptStream(input) {
return pipeline(
input,
async function* (source) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
yield iv
for await (const data of source) {
yield cipher.update(data)
}
yield cipher.final()
// must write the auth tag at the end of the encryption stream
if (authTagLength > 0) {
yield cipher.getAuthTag()
}
},
() => {}
)
}
function decryptStream(encryptedStream) {
return pipeline(
encryptedStream,
async function* (source) {
/**
* WARNING
*
* the crypted size has an initializtion vector + eventually an auth tag + a padding at the end
* whe can't predict the decrypted size from the start of the encrypted size
* thus, we can't set decrypted.length reliably
*
*/
const iv = await readChunk(source, ivLength)
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
let authTag = Buffer.alloc(0)
for await (const data of source) {
if (data.length >= authTagLength) {
// fast path, no buffer concat
yield cipher.update(authTag)
authTag = data.slice(data.length - authTagLength)
yield cipher.update(data.slice(0, data.length - authTagLength))
} else {
// slower since there is a concat
const fullData = Buffer.concat([authTag, data])
const fullDataLength = fullData.length
if (fullDataLength > authTagLength) {
authTag = fullData.slice(fullDataLength - authTagLength)
yield cipher.update(fullData.slice(0, fullDataLength - authTagLength))
} else {
authTag = fullData
}
}
}
if (authTagLength > 0) {
cipher.setAuthTag(authTag)
}
yield cipher.final()
},
() => {}
)
}
function encryptData(buffer) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = cipher.update(buffer)
return Buffer.concat([iv, encrypted, cipher.final(), authTagLength > 0 ? cipher.getAuthTag() : Buffer.alloc(0)])
}
function decryptData(buffer) {
const iv = buffer.slice(0, ivLength)
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
let encrypted
if (authTagLength > 0) {
const authTag = buffer.slice(buffer.length - authTagLength)
decipher.setAuthTag(authTag)
encrypted = buffer.slice(ivLength, buffer.length - authTagLength)
} else {
encrypted = buffer.slice(ivLength)
}
const decrypted = decipher.update(encrypted)
return Buffer.concat([decrypted, decipher.final()])
}
return {
id: algorithm,
algorithm,
key,
ivLength,
encryptData,
encryptStream,
decryptData,
decryptStream,
}
}
exports._getEncryptor = getEncryptor

View File

@@ -1,50 +0,0 @@
/* eslint-env jest */
import { Readable } from 'node:stream'
import { _getEncryptor } from './_encryptor'
import crypto from 'crypto'
const algorithms = ['none', 'aes-256-cbc', 'aes-256-gcm']
function streamToBuffer(stream) {
return new Promise(resolve => {
const bufs = []
stream.on('data', function (d) {
bufs.push(d)
})
stream.on('end', function () {
resolve(Buffer.concat(bufs))
})
})
}
algorithms.forEach(algorithm => {
describe(`test algorithm ${algorithm}`, () => {
const key = algorithm === 'none' ? undefined : '73c1838d7d8a6088ca2317fb5f29cd91'
const encryptor = _getEncryptor(algorithm, key)
const buffer = crypto.randomBytes(1024 * 1024 + 1)
it('handle buffer', () => {
const encrypted = encryptor.encryptData(buffer)
if (algorithm !== 'none') {
expect(encrypted.equals(buffer)).toEqual(false) // encrypted should be different
// ivlength, auth tag, padding
expect(encrypted.length).not.toEqual(buffer.length)
}
const decrypted = encryptor.decryptData(encrypted)
expect(decrypted.equals(buffer)).toEqual(true)
})
it('handle stream', async () => {
const stream = Readable.from(buffer)
stream.length = buffer.length
const encrypted = encryptor.encryptStream(stream)
if (algorithm !== 'none') {
expect(encrypted.length).toEqual(undefined)
}
const decrypted = encryptor.decryptStream(encrypted)
const decryptedBuffer = await streamToBuffer(decrypted)
expect(decryptedBuffer.equals(buffer)).toEqual(true)
})
})
})

View File

@@ -1,6 +1,6 @@
import path from 'path'
const { basename, dirname, join, resolve, relative, sep } = path.posix
const { basename, dirname, join, resolve, sep } = path.posix
export { basename, dirname, join }
@@ -19,6 +19,3 @@ export function split(path) {
return parts
}
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -1,5 +1,4 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import assert from 'assert'
import getStream from 'get-stream'
import { coalesceCalls } from '@vates/coalesce-calls'
import { createLogger } from '@xen-orchestra/log'
@@ -7,14 +6,13 @@ import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
import { pipeline } from 'stream'
import { randomBytes, randomUUID } from 'crypto'
import { randomBytes } from 'crypto'
import { synchronized } from 'decorator-synchronized'
import { basename, dirname, normalize as normalizePath } from './path'
import { basename, dirname, normalize as normalizePath } from './_path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
import { DEFAULT_ENCRYPTION_ALGORITHM, _getEncryptor } from './_encryptor'
const { info, warn } = createLogger('@xen-orchestra:fs')
const { warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
@@ -25,9 +23,6 @@ const computeRate = (hrtime, size) => {
const DEFAULT_TIMEOUT = 6e5 // 10 min
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
const ignoreEnoent = error => {
if (error == null || error.code !== 'ENOENT') {
throw error
@@ -68,15 +63,6 @@ class PrefixWrapper {
}
export default class RemoteHandlerAbstract {
#encryptor
get _encryptor() {
if (this.#encryptor === undefined) {
throw new Error(`Can't access to encryptor before remote synchronization`)
}
return this.#encryptor
}
constructor(remote, options = {}) {
if (remote.url === 'test://') {
this._remote = remote
@@ -125,51 +111,90 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (options.end !== undefined || options.start !== undefined) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
}
// TODO: remove method
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
let stream = await timeout.call(
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
dirMode,
flags: 'wx',
...options,
}),
this._timeout
)
// detect early errors
await fromEvent(stream, 'readable')
if (checksum) {
try {
const path = typeof file === 'string' ? file : file.path
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
} catch (error) {
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
throw error
}
}
if (!checksum) {
return streamP
}
if (this.isEncrypted) {
stream = this._encryptor.decryptStream(stream)
} else {
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
try {
stream.length = await this._getSize(file)
} catch (error) {
// ignore errors
}
}
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
return stream
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
.then(stream => {
// detect early errors
let promise = fromEvent(stream, 'readable')
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
promise = Promise.all([
promise,
ignoreErrors.call(
this._getSize(file).then(size => {
stream.length = size
})
),
])
}
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
// avoid a unhandled rejection warning
ignoreErrors.call(streamP)
return this._readFile(checksumFile(path), { flags: 'r' }).then(
checksum =>
streamP.then(stream => {
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
)
}
/**
@@ -185,8 +210,6 @@ export default class RemoteHandlerAbstract {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
input = this._encryptor.encryptStream(input)
if (checksum) {
checksumStream = createChecksumStream()
pipeline(input, checksumStream, noop)
@@ -197,8 +220,6 @@ export default class RemoteHandlerAbstract {
validator,
})
if (checksum) {
// using _outpuFile means the checksum will NOT be encrypted
// it is by design to allow checking of encrypted files without the key
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
}
}
@@ -218,13 +239,8 @@ export default class RemoteHandlerAbstract {
return timeout.call(this._getInfo(), this._timeout)
}
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
async getSize(file) {
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
return size - this._encryptor.ivLength
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
}
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
@@ -270,39 +286,26 @@ export default class RemoteHandlerAbstract {
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
const encryptedData = this._encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
await this._outputFile(normalizePath(file), data, { dirMode, flags })
}
async read(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async readFile(file, { flags = 'r' } = {}) {
const data = await this._readFile(normalizePath(file), { flags })
return this._encryptor.decryptData(data)
return this._readFile(normalizePath(file), { flags })
}
async #rename(oldPath, newPath, { checksum }, createTree = true) {
try {
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
}
await p
} catch (error) {
// ENOENT can be a missing target directory OR a missing source
if (error.code === 'ENOENT' && createTree) {
await this._mktree(dirname(newPath))
return this.#rename(oldPath, newPath, { checksum }, false)
}
throw error
async rename(oldPath, newPath, { checksum = false } = {}) {
oldPath = normalizePath(oldPath)
newPath = normalizePath(newPath)
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
}
}
rename(oldPath, newPath, { checksum = false } = {}) {
return this.#rename(normalizePath(oldPath), normalizePath(newPath), { checksum })
return p
}
async copy(oldPath, newPath, { checksum = false } = {}) {
@@ -331,71 +334,6 @@ export default class RemoteHandlerAbstract {
@synchronized()
async sync() {
await this._sync()
try {
await this._checkMetadata()
} catch (error) {
await this._forget()
throw error
}
}
async _canWriteMetadata() {
const list = await this.list('/', {
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
})
return list.length === 0
}
async _createMetadata() {
const encryptionAlgorithm = this._remote.encryptionKey === undefined ? 'none' : DEFAULT_ENCRYPTION_ALGORITHM
this.#encryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
await Promise.all([
this._writeFile(normalizePath(ENCRYPTION_DESC_FILENAME), JSON.stringify({ algorithm: encryptionAlgorithm }), {
flags: 'w',
}), // not encrypted
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
])
}
async _checkMetadata() {
let encryptionAlgorithm = 'none'
let data
try {
// this file is not encrypted
data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME), 'utf-8')
const json = JSON.parse(data)
encryptionAlgorithm = json.algorithm
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
encryptionAlgorithm = this._remote.encryptionKey === undefined ? 'none' : DEFAULT_ENCRYPTION_ALGORITHM
}
try {
this.#encryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
// this file is encrypted
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME, 'utf-8')
JSON.parse(data)
} catch (error) {
// can be enoent, bad algorithm, or broeken json ( bad key or algorithm)
if (encryptionAlgorithm !== 'none') {
if (await this._canWriteMetadata()) {
// any other error , but on empty remote => update with remote settings
info('will update metadata of this remote')
return this._createMetadata()
} else {
warn(
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
{ error }
)
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
throw error
}
}
}
}
async test() {
@@ -449,13 +387,11 @@ export default class RemoteHandlerAbstract {
}
async write(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async writeFile(file, data, { flags = 'wx' } = {}) {
const encryptedData = this._encryptor.encryptData(data)
await this._writeFile(normalizePath(file), encryptedData, { flags })
await this._writeFile(normalizePath(file), data, { flags })
}
// Methods that can be called by private methods to avoid parallel limit on public methods
@@ -488,10 +424,6 @@ export default class RemoteHandlerAbstract {
// Methods that can be implemented by inheriting classes
useVhdDirectory() {
return this._remote.useVhdDirectory ?? false
}
async _closeFile(fd) {
throw new Error('Not implemented')
}
@@ -574,13 +506,9 @@ export default class RemoteHandlerAbstract {
async _outputStream(path, input, { dirMode, validator }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await timeout.call(
this._createOutputStream(tmpPath, {
dirMode,
flags: 'wx',
}),
this._timeout
)
const output = await this.createOutputStream(tmpPath, {
dirMode,
})
try {
await fromCallback(pipeline, input, output)
if (validator !== undefined) {
@@ -663,10 +591,6 @@ export default class RemoteHandlerAbstract {
async _writeFile(file, data, options) {
throw new Error('Not implemented')
}
get isEncrypted() {
return this._encryptor.id !== 'NULL_ENCRYPTOR'
}
}
function createPrefixWrapperMethods() {

View File

@@ -1,29 +1,21 @@
/* eslint-env jest */
import { DEFAULT_ENCRYPTION_ALGORITHM, _getEncryptor } from './_encryptor'
import { Disposable, pFromCallback, TimeoutError } from 'promise-toolbox'
import { getSyncedHandler } from '.'
import { TimeoutError } from 'promise-toolbox'
import AbstractHandler from './abstract'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
const TIMEOUT = 10e3
class TestHandler extends AbstractHandler {
constructor(impl) {
super({ url: 'test://' }, { timeout: TIMEOUT })
Object.defineProperty(this, 'isEncrypted', {
get: () => false, // encryption is tested separatly
})
Object.keys(impl).forEach(method => {
this[`_${method}`] = impl[method]
})
}
}
const noop = Function.prototype
jest.useFakeTimers()
describe('closeFile()', () => {
@@ -38,6 +30,18 @@ describe('closeFile()', () => {
})
})
describe('createOutputStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createOutputStream: () => new Promise(() => {}),
})
const promise = testHandler.createOutputStream('File')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({
@@ -109,112 +113,3 @@ describe('rmdir()', () => {
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('encryption', () => {
let dir
beforeEach(async () => {
dir = await pFromCallback(cb => tmp.dir(cb))
})
afterAll(async () => {
await pFromCallback(cb => rimraf(dir, cb))
})
it('sync should NOT create metadata if missing (not encrypted)', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
expect(await fs.readdir(dir)).toEqual([])
})
it('sync should create metadata if missing (encrypted)', async () => {
await Disposable.use(
getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` }),
noop
)
expect(await fs.readdir(dir)).toEqual(['encryption.json', 'metadata.json'])
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
// encrypted , should not be parsable
expect(async () => JSON.parse(await fs.readFile(`${dir}/metadata.json`))).rejects.toThrowError()
})
it('sync should not modify existing metadata', async () => {
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "none"}`)
await fs.writeFile(`${dir}/metadata.json`, `{"random": "NOTSORANDOM"}`)
await Disposable.use(await getSyncedHandler({ url: `file://${dir}` }), noop)
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual('none')
const metadata = JSON.parse(await fs.readFile(`${dir}/metadata.json`, 'utf-8'))
expect(metadata.random).toEqual('NOTSORANDOM')
})
it('should modify metadata if empty', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
// nothing created without encryption
await Disposable.use(
getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` }),
noop
)
let encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual('none')
})
it(
'sync should work with encrypted',
Disposable.wrap(async function* () {
const encryptor = _getEncryptor(DEFAULT_ENCRYPTION_ALGORITHM, '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "${DEFAULT_ENCRYPTION_ALGORITHM}"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
const handler = yield getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` })
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
const metadata = JSON.parse(await handler.readFile(`./metadata.json`))
expect(metadata.random).toEqual('NOTSORANDOM')
})
)
it('sync should fail when changing key on non empty remote ', async () => {
const encryptor = _getEncryptor(DEFAULT_ENCRYPTION_ALGORITHM, '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "${DEFAULT_ENCRYPTION_ALGORITHM}"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
// different key but empty remote => ok
await Disposable.use(
getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` }),
noop
)
// remote is now non empty : can't modify key anymore
await fs.writeFile(`${dir}/nonempty.json`, 'content')
await expect(
Disposable.use(getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd10"` }), noop)
).rejects.toThrowError()
})
it('sync should fail when changing algorithm', async () => {
// encrypt with a non default algorithm
const encryptor = _getEncryptor('aes-256-cbc', '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "aes-256-gmc"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
// remote is now non empty : can't modify key anymore
await fs.writeFile(`${dir}/nonempty.json`, 'content')
await expect(
Disposable.use(getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` }), noop)
).rejects.toThrowError()
})
})

View File

@@ -1,7 +1,10 @@
/* eslint-env jest */
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -24,6 +27,9 @@ const unsecureRandomBytes = n => {
const TEST_DATA_LEN = 1024
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
const createTestDataStream = asyncIteratorToStream(function* () {
yield TEST_DATA
})
const rejectionOf = p =>
p.then(
@@ -76,6 +82,14 @@ handlers.forEach(url => {
})
})
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {
@@ -228,17 +242,6 @@ handlers.forEach(url => {
expect(await handler.list('.')).toEqual(['file2'])
expect(await handler.readFile(`file2`)).toEqual(TEST_DATA)
})
it(`should rename the file and create dest directory`, async () => {
await handler.outputFile('file', TEST_DATA)
await handler.rename('file', `sub/file2`)
expect(await handler.list('sub')).toEqual(['file2'])
expect(await handler.readFile(`sub/file2`)).toEqual(TEST_DATA)
})
it(`should fail with enoent if source file is missing`, async () => {
const error = await rejectionOf(handler.rename('file', `sub/file2`))
expect(error.code).toBe('ENOENT')
})
})
describe('#rmdir()', () => {

View File

@@ -5,7 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
export { DEFAULT_ENCRYPTION_ALGORITHM, UNENCRYPTED_ALGORITHM, isLegacyEncryptionAlgorithm } from './_encryptor'
import RemoteHandlerSmbMount from './smb-mount'
const HANDLERS = {
file: RemoteHandlerLocal,
@@ -15,8 +15,10 @@ const HANDLERS = {
try {
execa.sync('mount.cifs', ['-V'])
HANDLERS.smb = RemoteHandlerSmbMount
} catch (_) {
HANDLERS.smb = RemoteHandlerSmb
} catch (_) {}
}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]

View File

@@ -1,38 +1,13 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
const { info, warn } = createLogger('xo:fs:local')
// save current stack trace and add it to any rejected error
//
// This is especially useful when the resolution is separate from the initial
// call, which is often the case with RPC libs.
//
// There is a perf impact and it should be avoided in production.
async function addSyncStackTrace(fn, ...args) {
const stackContainer = new Error()
try {
return await fn.apply(this, args)
} catch (error) {
error.syncStack = stackContainer.stack
throw error
}
}
function dontAddSyncStackTrace(fn, ...args) {
return fn.apply(this, args)
}
export default class LocalHandler extends RemoteHandlerAbstract {
constructor(remote, opts = {}) {
super(remote)
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
this._retriesOnEagain = {
delay: 1e3,
retries: 9,
@@ -55,17 +30,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {
return this._addSyncStackTrace(fs.close, fd)
return fs.close(fd)
}
async _copy(oldPath, newPath) {
return this._addSyncStackTrace(fs.copy, this._getFilePath(oldPath), this._getFilePath(newPath))
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)
await this._addSyncStackTrace(fromEvent, stream, 'open')
await fromEvent(stream, 'open')
return stream
}
return fs.createReadStream('', {
@@ -78,7 +53,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
async _createWriteStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createWriteStream(this._getFilePath(file), options)
await this._addSyncStackTrace(fromEvent, stream, 'open')
await fromEvent(stream, 'open')
return stream
}
return fs.createWriteStream('', {
@@ -104,98 +79,71 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _getSize(file) {
const stats = await this._addSyncStackTrace(fs.stat, this._getFilePath(typeof file === 'string' ? file : file.path))
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
return stats.size
}
async _list(dir) {
return this._addSyncStackTrace(fs.readdir, this._getFilePath(dir))
return fs.readdir(this._getFilePath(dir))
}
async _lock(path) {
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
async onCompromised(error) {
warn('lock compromised', { error })
try {
release = await acquire()
info('compromised lock was reacquired')
} catch (error) {
warn('compromised lock could not be reacquired', { error })
}
},
})
let release = await this._addSyncStackTrace(acquire)
return async () => {
try {
await this._addSyncStackTrace(release)
} catch (error) {
warn('lock could not be released', { error })
}
}
_lock(path) {
return lockfile.lock(this._getFilePath(path))
}
_mkdir(dir, { mode }) {
return this._addSyncStackTrace(fs.mkdir, this._getFilePath(dir), { mode })
return fs.mkdir(this._getFilePath(dir), { mode })
}
async _openFile(path, flags) {
return this._addSyncStackTrace(fs.open, this._getFilePath(path), flags)
return fs.open(this._getFilePath(path), flags)
}
async _read(file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await this._addSyncStackTrace(fs.open, this._getFilePath(file), 'r') : file.fd
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
try {
return await this._addSyncStackTrace(
fs.read,
file,
buffer,
0,
buffer.length,
position === undefined ? null : position
)
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
} finally {
if (needsClose) {
await this._addSyncStackTrace(fs.close, file)
await fs.close(file)
}
}
}
async _readFile(file, options) {
const filePath = this._getFilePath(file)
return await this._addSyncStackTrace(retry, () => fs.readFile(filePath, options), this._retriesOnEagain)
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
}
async _rename(oldPath, newPath) {
return this._addSyncStackTrace(fs.rename, this._getFilePath(oldPath), this._getFilePath(newPath))
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _rmdir(dir) {
return this._addSyncStackTrace(fs.rmdir, this._getFilePath(dir))
return fs.rmdir(this._getFilePath(dir))
}
async _sync() {
const path = this._getRealPath('/')
await this._addSyncStackTrace(fs.ensureDir, path)
await this._addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
await fs.ensureDir(path)
await fs.access(path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return this._addSyncStackTrace(fs.truncate, this._getFilePath(file), len)
return fs.truncate(this._getFilePath(file), len)
}
async _unlink(file) {
const filePath = this._getFilePath(file)
return await this._addSyncStackTrace(retry, () => fs.unlink(filePath), this._retriesOnEagain)
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
}
_writeFd(file, buffer, position) {
return this._addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
return fs.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return this._addSyncStackTrace(fs.writeFile, this._getFilePath(file), data, { flag: flags })
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
}
}

View File

@@ -27,7 +27,7 @@ import copyStreamToBuffer from './_copyStreamToBuffer.js'
import createBufferFromStream from './_createBufferFromStream.js'
import guessAwsRegion from './_guessAwsRegion.js'
import RemoteHandlerAbstract from './abstract'
import { basename, join, split } from './path'
import { basename, join, split } from './_path'
import { asyncEach } from '@vates/async-each'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -155,14 +155,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
if (e.name === 'EntityTooLarge') {
return this._multipartCopy(oldPath, newPath)
}
// normalize this error code
if (e.name === 'NoSuchKey') {
const error = new Error(`ENOENT: no such file or directory '${oldPath}'`)
error.cause = e
error.code = 'ENOENT'
error.path = oldPath
throw error
}
throw e
}
}
@@ -533,8 +525,4 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {}
useVhdDirectory() {
return true
}
}

View File

@@ -0,0 +1,23 @@
import { parse } from 'xo-remote-parser'
import MountHandler from './_mount'
import { normalize } from './_path'
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
}
get type() {
return 'smb'
}
}

View File

@@ -1,23 +1,163 @@
import { parse } from 'xo-remote-parser'
import Smb2 from '@marsaud/smb2'
import MountHandler from './_mount'
import { normalize } from './path'
import RemoteHandlerAbstract from './abstract'
export default class SmbHandler extends MountHandler {
// Normalize the error code for file not found.
const wrapError = (error, code) => ({
__proto__: error,
cause: error,
code,
})
const normalizeError = (error, shouldBeDirectory) => {
const { code } = error
throw code === 'STATUS_DIRECTORY_NOT_EMPTY'
? wrapError(error, 'ENOTEMPTY')
: code === 'STATUS_FILE_IS_A_DIRECTORY'
? wrapError(error, 'EISDIR')
: code === 'STATUS_NOT_A_DIRECTORY'
? wrapError(error, 'ENOTDIR')
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
? wrapError(error, 'ENOENT')
: code === 'STATUS_OBJECT_NAME_COLLISION'
? wrapError(error, 'EEXIST')
: code === 'STATUS_NOT_SUPPORTED' || code === 'STATUS_INVALID_PARAMETER'
? wrapError(error, shouldBeDirectory ? 'ENOTDIR' : 'EISDIR')
: error
}
const normalizeDirError = error => normalizeError(error, true)
export default class SmbHandler extends RemoteHandlerAbstract {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
super(remote, opts)
// defined in _sync()
this._client = undefined
const prefix = this._remote.path
this._prefix = prefix !== '' ? prefix + '\\' : prefix
}
get type() {
return 'smb'
}
_getFilePath(file) {
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
}
_dirname(file) {
const parts = file.split('\\')
parts.pop()
return parts.join('\\')
}
_closeFile(file) {
return this._client.close(file).catch(normalizeError)
}
_createReadStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createReadStream(file, options).catch(normalizeError)
}
_createWriteStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createWriteStream(file, options).catch(normalizeError)
}
_forget() {
const client = this._client
this._client = undefined
return client.disconnect()
}
_getSize(file) {
return this._client.getSize(this._getFilePath(file)).catch(normalizeError)
}
_list(dir) {
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_mkdir(dir, { mode }) {
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
}
// TODO: add flags
_openFile(path, flags) {
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
}
async _read(file, buffer, position) {
const client = this._client
const needsClose = typeof file === 'string'
file = needsClose ? await client.open(this._getFilePath(file)) : file.fd
try {
return await client.read(file, buffer, 0, buffer.length, position)
} catch (error) {
normalizeError(error)
} finally {
if (needsClose) {
await client.close(file)
}
}
}
_readFile(file, options) {
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
}
_rename(oldPath, newPath) {
return this._client
.rename(this._getFilePath(oldPath), this._getFilePath(newPath), {
replace: true,
})
.catch(normalizeError)
}
_rmdir(dir) {
return this._client.rmdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_sync() {
const remote = this._remote
this._client = new Smb2({
share: `\\\\${remote.host}`,
domain: remote.domain,
username: remote.username,
password: remote.password,
autoCloseTimeout: 0,
})
// Check access (smb2 does not expose connect in public so far...)
return this.list('.')
}
_truncate(file, len) {
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
}
_unlink(file) {
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
}
_writeFd(file, buffer, position) {
return this._client.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, options) {
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
}
}

View File

@@ -1 +0,0 @@
VITE_XO_HOST=

View File

@@ -1,28 +0,0 @@
/* eslint-env node */
require("@rushstack/eslint-patch/modern-module-resolution");
module.exports = {
globals: {
XO_LITE_GIT_HEAD: true,
XO_LITE_VERSION: true,
},
root: true,
env: {
node: true,
},
extends: [
"plugin:vue/vue3-essential",
"eslint:recommended",
"@vue/eslint-config-typescript/recommended",
"@vue/eslint-config-prettier",
],
plugins: ["@limegrass/import-alias"],
rules: {
"@typescript-eslint/no-non-null-assertion": "off",
"@typescript-eslint/no-explicit-any": "off",
"@limegrass/import-alias/import-alias": [
"error",
{ aliasConfigPath: require("path").join(__dirname, "tsconfig.json") },
],
},
};

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,2 +0,0 @@
// Keeping this file to prevent applying the global monorepo config for now
module.exports = {};

View File

@@ -1,3 +0,0 @@
{
"recommendations": ["Vue.volar"]
}

Some files were not shown because too many files have changed in this diff Show More