Compare commits
154 Commits
lazy-mixin
...
xapi-typeg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b5d97f978 | ||
|
|
8c14906a60 | ||
|
|
62591e1f6f | ||
|
|
ea4a888c5e | ||
|
|
281a1cc549 | ||
|
|
d52dcd0708 | ||
|
|
d8e01b2867 | ||
|
|
dca3f39156 | ||
|
|
31e964fe0f | ||
|
|
39d973c43f | ||
|
|
55f921959d | ||
|
|
6598090662 | ||
|
|
d7f29e7363 | ||
|
|
82df6089c3 | ||
|
|
80cc66964e | ||
|
|
7883d38622 | ||
|
|
2cb5169b6d | ||
|
|
9ad2c07984 | ||
|
|
a9c1239149 | ||
|
|
cb1223f72e | ||
|
|
4dc7575d5b | ||
|
|
276d1ce60a | ||
|
|
58ab32a623 | ||
|
|
c1846e6ff3 | ||
|
|
826de17111 | ||
|
|
8a09ea8bc1 | ||
|
|
1297c925ad | ||
|
|
74d15e1a92 | ||
|
|
ae373c3e77 | ||
|
|
e9b90caa3a | ||
|
|
b89e77a6a4 | ||
|
|
61691ac46b | ||
|
|
512b96af24 | ||
|
|
d369593979 | ||
|
|
2f38e0564b | ||
|
|
5e8dd4e4bc | ||
|
|
8f9f1f566d | ||
|
|
d7870b8860 | ||
|
|
97fa23f890 | ||
|
|
f839887da8 | ||
|
|
15bfaa15ca | ||
|
|
4a3183ffa0 | ||
|
|
18d03a076b | ||
|
|
4bed4195ac | ||
|
|
a963878af5 | ||
|
|
d6c3dc87e0 | ||
|
|
5391a9a5ad | ||
|
|
b50e95802c | ||
|
|
75a9799e96 | ||
|
|
dbb9e4d60f | ||
|
|
d27b6bd49d | ||
|
|
c5d2726faa | ||
|
|
a2a98c490f | ||
|
|
e2dc1d98f1 | ||
|
|
658c26d3c9 | ||
|
|
612095789a | ||
|
|
7418d9f670 | ||
|
|
f344c58a62 | ||
|
|
36b94f745d | ||
|
|
08cdcf4112 | ||
|
|
76813737ef | ||
|
|
53d15d6a77 | ||
|
|
dd01b62b87 | ||
|
|
9fab15537b | ||
|
|
d87db05b2b | ||
|
|
f1f32c962c | ||
|
|
ad149740b1 | ||
|
|
9a4e938b91 | ||
|
|
a226760b07 | ||
|
|
a11450c3a7 | ||
|
|
e0cab4f937 | ||
|
|
468250f291 | ||
|
|
d04b93c17e | ||
|
|
911556a1aa | ||
|
|
c7d3230eef | ||
|
|
b63086bf09 | ||
|
|
a4118a5676 | ||
|
|
26e7e6467c | ||
|
|
1c9552fa58 | ||
|
|
9875cb5575 | ||
|
|
d1c6bb8829 | ||
|
|
ef7005a291 | ||
|
|
8068b83ffe | ||
|
|
f01a89710c | ||
|
|
38ced81ada | ||
|
|
9834632d59 | ||
|
|
bb4504dd50 | ||
|
|
8864c2f2db | ||
|
|
19208472e6 | ||
|
|
10c77ba3cc | ||
|
|
cd28fd4945 | ||
|
|
6778d6aa4a | ||
|
|
433851d771 | ||
|
|
d157fd3528 | ||
|
|
9150823c37 | ||
|
|
07c3a44441 | ||
|
|
051bbf9449 | ||
|
|
22ea1c0e2a | ||
|
|
6432a44860 | ||
|
|
493d861de3 | ||
|
|
82452e9616 | ||
|
|
2fbeaa618a | ||
|
|
6c08afaa0e | ||
|
|
af4cc1f574 | ||
|
|
2fb27b26cd | ||
|
|
11e09e1f87 | ||
|
|
9ccb5f8aa9 | ||
|
|
af87d6a0ea | ||
|
|
d847f45cb3 | ||
|
|
38c615609a | ||
|
|
144cc4b82f | ||
|
|
d24ab141e9 | ||
|
|
8505374fcf | ||
|
|
e53d961fc3 | ||
|
|
dc8ca7a8ee | ||
|
|
3d1b87d9dc | ||
|
|
01fa2af5cd | ||
|
|
20a89ca45a | ||
|
|
16ca2f8da9 | ||
|
|
30fe9764ad | ||
|
|
e246c8ee47 | ||
|
|
ba03a48498 | ||
|
|
b96dd0160a | ||
|
|
49890a09b7 | ||
|
|
dfce56cee8 | ||
|
|
a6fee2946a | ||
|
|
34c849ee89 | ||
|
|
c7192ed3bf | ||
|
|
4d3dc0c5f7 | ||
|
|
9ba4afa073 | ||
|
|
3ea4422d13 | ||
|
|
de2e314f7d | ||
|
|
2380fb42fe | ||
|
|
95b76076a3 | ||
|
|
b415d4c34c | ||
|
|
2d82b6dd6e | ||
|
|
16b1935f12 | ||
|
|
50ec614b2a | ||
|
|
9e11a0af6e | ||
|
|
0c3e42e0b9 | ||
|
|
36b31bb0b3 | ||
|
|
c03c41450b | ||
|
|
dfc2b5d88b | ||
|
|
87e3e3ffe3 | ||
|
|
dae37c6a50 | ||
|
|
c7df11cc6f | ||
|
|
87f1f208c3 | ||
|
|
ba8c5d740e | ||
|
|
c275d5d999 | ||
|
|
cfc53c9c94 | ||
|
|
87df917157 | ||
|
|
395d87d290 | ||
|
|
aff8ec08ad | ||
|
|
4d40b56d85 |
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -4,7 +4,6 @@ about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,8 +10,6 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/examples/node_modules/
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
|
||||
|
||||
`opts` is an object that can contains the following options:
|
||||
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
|
||||
- `signal`: an abort signal to stop the iteration
|
||||
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
|
||||
|
||||
`opts` is an object that can contains the following options:
|
||||
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
|
||||
- `signal`: an abort signal to stop the iteration
|
||||
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
|
||||
|
||||
|
||||
@@ -9,7 +9,16 @@ class AggregateError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
|
||||
/**
|
||||
* @template Item
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
|
||||
if (concurrency === 0) {
|
||||
concurrency = Infinity
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
|
||||
const errors = []
|
||||
|
||||
@@ -36,7 +36,7 @@ describe('asyncEach', () => {
|
||||
it('works', async () => {
|
||||
const iteratee = jest.fn(async () => {})
|
||||
|
||||
await asyncEach.call(thisArg, iterable, iteratee)
|
||||
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
|
||||
|
||||
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
|
||||
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
|
||||
@@ -66,7 +66,7 @@ describe('asyncEach', () => {
|
||||
}
|
||||
})
|
||||
|
||||
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
|
||||
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
|
||||
@@ -91,7 +91,9 @@ describe('asyncEach', () => {
|
||||
}
|
||||
})
|
||||
|
||||
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
|
||||
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
|
||||
'asyncEach aborted'
|
||||
)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --branches=72"
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.2",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -26,7 +26,13 @@ module.exports = async function main(args) {
|
||||
await asyncMap(_, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
try {
|
||||
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
|
||||
await adapter.cleanVm(vmDir, {
|
||||
fixMetadata: fix,
|
||||
remove,
|
||||
merge,
|
||||
logInfo: (...args) => console.log(...args),
|
||||
logWarn: (...args) => console.warn(...args),
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.25.0",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/backups": "^0.27.4",
|
||||
"@xen-orchestra/fs": "^3.0.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.7.3",
|
||||
"version": "0.7.7",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -245,7 +245,7 @@ exports.Backup = class Backup {
|
||||
})
|
||||
)
|
||||
),
|
||||
() => settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined,
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
@@ -15,7 +15,7 @@ const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
@@ -47,13 +47,12 @@ const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
try {
|
||||
const subFiles = await readdir(realPath)
|
||||
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOTDIR') {
|
||||
throw error
|
||||
}
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
@@ -280,7 +279,7 @@ class RemoteAdapter {
|
||||
const dirs = new Set(files.map(file => dirname(file)))
|
||||
for (const dir of dirs) {
|
||||
// don't merge in main process, unused VHDs will be merged in the next backup run
|
||||
await this.cleanVm(dir, { remove: true, onLog: warn })
|
||||
await this.cleanVm(dir, { remove: true, logWarn: warn })
|
||||
}
|
||||
|
||||
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
|
||||
@@ -292,7 +291,7 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
#useVhdDirectory() {
|
||||
return this.handler.type === 's3'
|
||||
return this.handler.useVhdDirectory()
|
||||
}
|
||||
|
||||
#useAlias() {
|
||||
@@ -383,8 +382,12 @@ class RemoteAdapter {
|
||||
const entriesMap = {}
|
||||
await asyncMap(await readdir(path), async name => {
|
||||
try {
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
const stats = await lstat(`${path}/${name}`)
|
||||
if (stats.isDirectory()) {
|
||||
entriesMap[name + '/'] = {}
|
||||
} else if (stats.isFile()) {
|
||||
entriesMap[name] = {}
|
||||
}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const logAfterEnd = () => {
|
||||
throw new Error('task has already ended')
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
error.log = log
|
||||
throw error
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
@@ -128,42 +128,49 @@ class VmBackup {
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, warnMessage, parallel = true) {
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
return
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await fn(writer)
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ afterEach(async () => {
|
||||
})
|
||||
|
||||
const uniqueId = () => uuid.v1()
|
||||
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
|
||||
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
|
||||
|
||||
async function generateVhd(path, opts = {}) {
|
||||
let vhd
|
||||
@@ -78,15 +78,15 @@ test('It remove broken vhd', async () => {
|
||||
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued += message
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: false, onLog })
|
||||
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
|
||||
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
|
||||
expect(loggued).toEqual(`VHD check error`)
|
||||
// not removed
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
// really remove it
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
expect((await handler.list(basePath)).length).toEqual(0)
|
||||
})
|
||||
|
||||
@@ -118,15 +118,13 @@ test('it remove vhd with missing or multiple ancestors', async () => {
|
||||
)
|
||||
// clean
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
|
||||
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
|
||||
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
|
||||
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
|
||||
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
|
||||
|
||||
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
|
||||
})
|
||||
@@ -159,14 +157,12 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
|
||||
})
|
||||
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
let matched = loggued.match(/deleting unused VHD /g) || []
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
let matched = loggued.match(/deleting unused VHD/g) || []
|
||||
expect(matched.length).toEqual(1) // only one vhd should have been deleted
|
||||
matched = loggued.match(/abandonned.vhd is unused/g) || []
|
||||
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
|
||||
|
||||
// a missing vhd cause clean to remove all vhds
|
||||
await handler.writeFile(
|
||||
@@ -183,8 +179,8 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
|
||||
{ flags: 'w' }
|
||||
)
|
||||
loggued = ''
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
matched = loggued.match(/deleting unused VHD /g) || []
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
matched = loggued.match(/deleting unused VHD/g) || []
|
||||
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
|
||||
})
|
||||
|
||||
@@ -220,16 +216,16 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
})
|
||||
|
||||
let loggued = []
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued.push(message)
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
|
||||
|
||||
loggued = []
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
|
||||
const [merging] = loggued
|
||||
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
|
||||
expect(merging).toEqual(`merging VHD chain`)
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
|
||||
// size should be the size of children + grand children after the merge
|
||||
@@ -275,7 +271,7 @@ test('it finish unterminated merge ', async () => {
|
||||
})
|
||||
)
|
||||
|
||||
await adapter.cleanVm('/', { remove: true, merge: true })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
|
||||
// only check deletion
|
||||
@@ -382,7 +378,7 @@ describe('tests multiple combination ', () => {
|
||||
})
|
||||
)
|
||||
|
||||
await adapter.cleanVm('/', { remove: true, merge: true })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
|
||||
// size should be the size of children + grand children + clean after the merge
|
||||
@@ -418,7 +414,7 @@ describe('tests multiple combination ', () => {
|
||||
test('it cleans orphan merge states ', async () => {
|
||||
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
|
||||
|
||||
await adapter.cleanVm('/', { remove: true })
|
||||
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
|
||||
|
||||
expect(await handler.list(basePath)).toEqual([])
|
||||
})
|
||||
@@ -433,7 +429,11 @@ test('check Aliases should work alone', async () => {
|
||||
|
||||
await generateVhd(`vhds/data/missingalias.vhd`)
|
||||
|
||||
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
|
||||
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
|
||||
remove: true,
|
||||
handler,
|
||||
logWarn: () => {},
|
||||
})
|
||||
|
||||
// only ok have suvived
|
||||
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
// we only check size of all the vhd are VhdFiles
|
||||
function shouldComputeVhdsSize(vhds) {
|
||||
function shouldComputeVhdsSize(handler, vhds) {
|
||||
if (handler.isEncrypted) {
|
||||
return false
|
||||
}
|
||||
return vhds.every(vhd => vhd instanceof VhdFile)
|
||||
}
|
||||
|
||||
@@ -24,63 +29,48 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
Disposable.use(
|
||||
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
|
||||
async vhds => {
|
||||
if (shouldComputeVhdsSize(vhds)) {
|
||||
if (shouldComputeVhdsSize(handler, vhds)) {
|
||||
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
|
||||
return sum(sizes)
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
// chain is [ ancestor, child1, ..., childn]
|
||||
// 1. Create a VhdSynthetic from all children
|
||||
// 2. Merge the VhdSynthetic into the ancestor
|
||||
// 3. Delete all (now) unused VHDs
|
||||
// 4. Rename the ancestor with the merged data to the latest child
|
||||
//
|
||||
// VhdSynthetic
|
||||
// |
|
||||
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
|
||||
// [ ancestor, child1, ...,child n-1, childn ]
|
||||
// | \___________________/ ^
|
||||
// | | |
|
||||
// | unused VHDs |
|
||||
// | |
|
||||
// \___________rename_____________/
|
||||
|
||||
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
|
||||
assert(chain.length >= 2)
|
||||
const chainCopy = [...chain]
|
||||
const parent = chainCopy.pop()
|
||||
const children = chainCopy
|
||||
|
||||
// chain is [ ancestor, child_1, ..., child_n ]
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) {
|
||||
if (merge) {
|
||||
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total })
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
const mergedSize = await mergeVhd(handler, parent, handler, children, {
|
||||
logInfo,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
remove,
|
||||
})
|
||||
|
||||
clearInterval(handle)
|
||||
return mergedSize
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
|
||||
const listVhds = async (handler, vmDir) => {
|
||||
const listVhds = async (handler, vmDir, logWarn) => {
|
||||
const vhds = new Set()
|
||||
const aliases = {}
|
||||
const interruptedVhds = new Map()
|
||||
@@ -100,12 +90,23 @@ const listVhds = async (handler, vmDir) => {
|
||||
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
|
||||
})
|
||||
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
|
||||
list.forEach(file => {
|
||||
|
||||
await asyncMap(list, async file => {
|
||||
const res = INTERRUPTED_VHDS_REG.exec(file)
|
||||
if (res === null) {
|
||||
vhds.add(`${vdiDir}/${file}`)
|
||||
} else {
|
||||
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
|
||||
try {
|
||||
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
|
||||
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
|
||||
statePath: `${vdiDir}/${file}`,
|
||||
chain: mergeState.chain,
|
||||
})
|
||||
} catch (error) {
|
||||
// fall back to a non resuming merge
|
||||
vhds.add(`${vdiDir}/${file}`)
|
||||
logWarn('failed to read existing merge state', { path: file, error })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -121,15 +122,15 @@ async function checkAliases(
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
) {
|
||||
const aliasFound = []
|
||||
for (const path of aliasPaths) {
|
||||
const target = await resolveVhdAlias(handler, path)
|
||||
for (const alias of aliasPaths) {
|
||||
const target = await resolveVhdAlias(handler, alias)
|
||||
|
||||
if (!isVhdFile(target)) {
|
||||
logWarn('alias references non VHD target', { path, target })
|
||||
logWarn('alias references non VHD target', { alias, target })
|
||||
if (remove) {
|
||||
logInfo('removing alias and non VHD target', { path, target })
|
||||
logInfo('removing alias and non VHD target', { alias, target })
|
||||
await handler.unlink(target)
|
||||
await handler.unlink(path)
|
||||
await handler.unlink(alias)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -142,13 +143,13 @@ async function checkAliases(
|
||||
// error during dispose should not trigger a deletion
|
||||
}
|
||||
} catch (error) {
|
||||
logWarn('missing or broken alias target', { target, path, error })
|
||||
logWarn('missing or broken alias target', { alias, target, error })
|
||||
if (remove) {
|
||||
try {
|
||||
await VhdAbstract.unlink(handler, path)
|
||||
await VhdAbstract.unlink(handler, alias)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
logWarn('error deleting alias target', { target, path, error })
|
||||
logWarn('error deleting alias target', { alias, target, error })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -158,17 +159,17 @@ async function checkAliases(
|
||||
aliasFound.push(resolve('/', target))
|
||||
}
|
||||
|
||||
const entries = await handler.list(targetDataRepository, {
|
||||
const vhds = await handler.list(targetDataRepository, {
|
||||
ignoreMissing: true,
|
||||
prependDir: true,
|
||||
})
|
||||
|
||||
entries.forEach(async entry => {
|
||||
if (!aliasFound.includes(entry)) {
|
||||
logWarn('no alias references VHD', { entry })
|
||||
await asyncMap(vhds, async path => {
|
||||
if (!aliasFound.includes(path)) {
|
||||
logWarn('no alias references VHD', { path })
|
||||
if (remove) {
|
||||
logInfo('deleting unaliased VHD')
|
||||
await VhdAbstract.unlink(handler, entry)
|
||||
logInfo('deleting unused VHD', { path })
|
||||
await VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -182,15 +183,16 @@ exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
|
||||
) {
|
||||
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
|
||||
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
|
||||
|
||||
const handler = this._handler
|
||||
|
||||
const vhdsToJSons = new Set()
|
||||
const vhdById = new Map()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
|
||||
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(vhds, async path => {
|
||||
@@ -208,12 +210,31 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
// Detect VHDs with the same UUIDs
|
||||
//
|
||||
// Due to a bug introduced in a1bcd35e2
|
||||
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
|
||||
let vhdKept = vhd
|
||||
if (duplicate !== undefined) {
|
||||
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
|
||||
if (duplicate.containsAllDataOf(vhd)) {
|
||||
logWarn(`should delete ${path}`)
|
||||
vhdKept = duplicate
|
||||
vhds.delete(path)
|
||||
} else if (vhd.containsAllDataOf(duplicate)) {
|
||||
logWarn(`should delete ${duplicate._path}`)
|
||||
vhds.delete(duplicate._path)
|
||||
} else {
|
||||
logWarn('same ids but different content')
|
||||
}
|
||||
}
|
||||
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
|
||||
})
|
||||
} catch (error) {
|
||||
vhds.delete(path)
|
||||
logWarn('VHD check error', { path, error })
|
||||
if (error?.code === 'ERR_ASSERTION' && remove) {
|
||||
logInfo('deleting broken path', { path })
|
||||
logInfo('deleting broken VHD', { path })
|
||||
return VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
@@ -222,7 +243,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// remove interrupted merge states for missing VHDs
|
||||
for (const interruptedVhd of interruptedVhds.keys()) {
|
||||
if (!vhds.has(interruptedVhd)) {
|
||||
const statePath = interruptedVhds.get(interruptedVhd)
|
||||
const { statePath } = interruptedVhds.get(interruptedVhd)
|
||||
interruptedVhds.delete(interruptedVhd)
|
||||
|
||||
logWarn('orphan merge state', {
|
||||
@@ -261,9 +282,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhdPath)
|
||||
|
||||
logWarn('parent VHD is missing', { parent, vhdPath })
|
||||
logWarn('parent VHD is missing', { parent, child: vhdPath })
|
||||
if (remove) {
|
||||
logInfo('deleting orphan VHD', { vhdPath })
|
||||
logInfo('deleting orphan VHD', { path: vhdPath })
|
||||
deletions.push(VhdAbstract.unlink(handler, vhdPath))
|
||||
}
|
||||
}
|
||||
@@ -314,7 +335,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
metadata = JSON.parse(await handler.readFile(json))
|
||||
} catch (error) {
|
||||
logWarn('failed to read metadata file', { json, error })
|
||||
logWarn('failed to read backup metadata', { path: json, error })
|
||||
jsons.delete(json)
|
||||
return
|
||||
}
|
||||
@@ -325,9 +346,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
logWarn('metadata XVA is missing', { json })
|
||||
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { json })
|
||||
logInfo('deleting incomplete backup', { path: json })
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -348,9 +369,9 @@ exports.cleanVm = async function cleanVm(
|
||||
vhdsToJSons[path] = json
|
||||
})
|
||||
} else {
|
||||
logWarn('some metadata VHDs are missing', { json, missingVhds })
|
||||
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { json })
|
||||
logInfo('deleting incomplete backup', { path: json })
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -362,7 +383,7 @@ exports.cleanVm = async function cleanVm(
|
||||
const unusedVhdsDeletion = []
|
||||
const toMerge = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
@@ -386,14 +407,14 @@ exports.cleanVm = async function cleanVm(
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
chain.unshift(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
logWarn('unused VHD', { vhd })
|
||||
logWarn('unused VHD', { path: vhd })
|
||||
if (remove) {
|
||||
logInfo('deleting unused VHD', { vhd })
|
||||
logInfo('deleting unused VHD', { path: vhd })
|
||||
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
|
||||
}
|
||||
}
|
||||
@@ -404,7 +425,13 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// merge interrupted VHDs
|
||||
for (const parent of interruptedVhds.keys()) {
|
||||
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
|
||||
// before #6349 the chain wasn't in the mergeState
|
||||
const { chain, statePath } = interruptedVhds.get(parent)
|
||||
if (chain === undefined) {
|
||||
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
|
||||
} else {
|
||||
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
|
||||
}
|
||||
}
|
||||
|
||||
Object.values(vhdChainsToMerge).forEach(chain => {
|
||||
@@ -417,9 +444,9 @@ exports.cleanVm = async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
|
||||
const merged = await limitedMergeVhdChain(handler, chain, { logInfo, logWarn, remove, merge })
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
}
|
||||
})
|
||||
@@ -462,7 +489,11 @@ exports.cleanVm = async function cleanVm(
|
||||
if (mode === 'full') {
|
||||
// a full backup : check size
|
||||
const linkedXva = resolve('/', vmDir, xva)
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
try {
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
} catch (error) {
|
||||
// can fail with encrypted remote
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
@@ -474,11 +505,15 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
|
||||
logWarn('incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logWarn('failed to get metadata size', { metadataPath, error })
|
||||
logWarn('failed to get backup size', { backup: metadataPath, error })
|
||||
return
|
||||
}
|
||||
|
||||
@@ -488,7 +523,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
} catch (error) {
|
||||
logWarn('metadata size update failed', { metadataPath, error })
|
||||
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
const eos = require('end-of-stream')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
@@ -11,18 +13,23 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
|
||||
const { forks = 0 } = stream
|
||||
stream.forks = forks + 1
|
||||
|
||||
debug('forking', { forks: stream.forks })
|
||||
|
||||
const proxy = new PassThrough()
|
||||
stream.pipe(proxy)
|
||||
eos(stream, error => {
|
||||
if (error !== undefined) {
|
||||
debug('error on original stream, destroying fork', { error })
|
||||
proxy.destroy(error)
|
||||
}
|
||||
})
|
||||
eos(proxy, _ => {
|
||||
stream.forks--
|
||||
eos(proxy, error => {
|
||||
debug('end of stream, unpiping', { error, forks: --stream.forks })
|
||||
|
||||
stream.unpipe(proxy)
|
||||
|
||||
if (stream.forks === 0) {
|
||||
debug('no more forks, destroying original stream')
|
||||
stream.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -49,6 +49,11 @@ const isValidTar = async (handler, size, fd) => {
|
||||
// TODO: find an heuristic for compressed files
|
||||
async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
|
||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||
if (handler.isEncrypted) {
|
||||
return true
|
||||
}
|
||||
try {
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
try {
|
||||
@@ -66,7 +71,6 @@ async function isValidXva(path) {
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
console.error('isValidXva', path, error)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
## File structure on remote
|
||||
|
||||
### with vhd files
|
||||
|
||||
```
|
||||
<remote>
|
||||
└─ xo-vm-backups
|
||||
@@ -30,6 +32,19 @@
|
||||
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
|
||||
```
|
||||
|
||||
### with vhd directories
|
||||
|
||||
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
|
||||
|
||||
```
|
||||
<vdis>/<job UUID>/<VDI UUID>
|
||||
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
|
||||
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
|
||||
└─ data
|
||||
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
|
||||
└─ <uuid>.vhd
|
||||
```
|
||||
|
||||
## Attributes
|
||||
|
||||
### Of created snapshots
|
||||
|
||||
@@ -64,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
|
||||
try {
|
||||
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
|
||||
try {
|
||||
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
|
||||
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
|
||||
} catch (error) {
|
||||
// consider the clean successful if the VM dir is missing
|
||||
if (error.code !== 'ENOENT') {
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.25.0",
|
||||
"version": "0.27.4",
|
||||
"engines": {
|
||||
"node": ">=14.6"
|
||||
},
|
||||
@@ -22,7 +22,7 @@
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^3.0.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^4.0.1",
|
||||
@@ -38,7 +38,7 @@
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.2.0",
|
||||
"vhd-lib": "^4.0.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -46,7 +46,7 @@
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^1.2.0"
|
||||
"@xen-orchestra/xapi": "^1.4.2"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
19
@xen-orchestra/fs/docs/encryption.md
Normal file
19
@xen-orchestra/fs/docs/encryption.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## metadata files
|
||||
|
||||
- Older remotes dont have any metadata file
|
||||
- Remote used since 5.75 have two files : encryption.json and metadata.json
|
||||
|
||||
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
|
||||
|
||||
If the remote is empty, the `sync` method creates them
|
||||
|
||||
### encryption.json
|
||||
|
||||
A non encrypted file contain the algorithm and parameters used for this remote.
|
||||
This MUST NOT contains the key.
|
||||
|
||||
### metadata.json
|
||||
|
||||
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
|
||||
|
||||
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "1.0.3",
|
||||
"version": "3.0.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
@@ -17,18 +17,18 @@
|
||||
"xo-fs": "./cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
"node": ">=14.13"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.54.0",
|
||||
"@aws-sdk/lib-storage": "^3.54.0",
|
||||
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
|
||||
"@aws-sdk/node-http-handler": "^3.54.0",
|
||||
"@marsaud/smb2": "^0.18.0",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@vates/async-each": "^0.1.0",
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/coalesce-calls": "^0.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/read-chunk": "^1.0.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
@@ -40,9 +40,10 @@
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"readable-stream": "^3.0.6",
|
||||
"pumpify": "^2.0.1",
|
||||
"readable-stream": "^4.1.0",
|
||||
"through2": "^4.0.2",
|
||||
"xo-remote-parser": "^0.8.0"
|
||||
"xo-remote-parser": "^0.9.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -50,7 +51,6 @@
|
||||
"@babel/plugin-proposal-decorators": "^7.1.6",
|
||||
"@babel/plugin-proposal-function-bind": "^7.0.0",
|
||||
"@babel/preset-env": "^7.8.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"dotenv": "^16.0.0",
|
||||
@@ -68,5 +68,9 @@
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./path": "./dist/path.js"
|
||||
}
|
||||
}
|
||||
|
||||
71
@xen-orchestra/fs/src/_encryptor.js
Normal file
71
@xen-orchestra/fs/src/_encryptor.js
Normal file
@@ -0,0 +1,71 @@
|
||||
const { readChunk } = require('@vates/read-chunk')
|
||||
const crypto = require('crypto')
|
||||
const pumpify = require('pumpify')
|
||||
|
||||
function getEncryptor(key) {
|
||||
if (key === undefined) {
|
||||
return {
|
||||
id: 'NULL_ENCRYPTOR',
|
||||
algorithm: 'none',
|
||||
key: 'none',
|
||||
ivLength: 0,
|
||||
encryptData: buffer => buffer,
|
||||
encryptStream: stream => stream,
|
||||
decryptData: buffer => buffer,
|
||||
decryptStream: stream => stream,
|
||||
}
|
||||
}
|
||||
const algorithm = 'aes-256-cbc'
|
||||
const ivLength = 16
|
||||
|
||||
function encryptStream(input) {
|
||||
const iv = crypto.randomBytes(ivLength)
|
||||
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
|
||||
|
||||
const encrypted = pumpify(input, cipher)
|
||||
encrypted.unshift(iv)
|
||||
return encrypted
|
||||
}
|
||||
|
||||
async function decryptStream(encryptedStream) {
|
||||
const iv = await readChunk(encryptedStream, ivLength)
|
||||
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
|
||||
/**
|
||||
* WARNING
|
||||
*
|
||||
* the crytped size has an initializtion vector + a padding at the end
|
||||
* whe can't predict the decrypted size from the start of the encrypted size
|
||||
* thus, we can't set decrypted.length reliably
|
||||
*
|
||||
*/
|
||||
return pumpify(encryptedStream, cipher)
|
||||
}
|
||||
|
||||
function encryptData(buffer) {
|
||||
const iv = crypto.randomBytes(ivLength)
|
||||
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
|
||||
const encrypted = cipher.update(buffer)
|
||||
return Buffer.concat([iv, encrypted, cipher.final()])
|
||||
}
|
||||
|
||||
function decryptData(buffer) {
|
||||
const iv = buffer.slice(0, ivLength)
|
||||
const encrypted = buffer.slice(ivLength)
|
||||
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
|
||||
const decrypted = decipher.update(encrypted)
|
||||
return Buffer.concat([decrypted, decipher.final()])
|
||||
}
|
||||
|
||||
return {
|
||||
id: algorithm,
|
||||
algorithm,
|
||||
key,
|
||||
ivLength,
|
||||
encryptData,
|
||||
encryptStream,
|
||||
decryptData,
|
||||
decryptStream,
|
||||
}
|
||||
}
|
||||
|
||||
exports._getEncryptor = getEncryptor
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
|
||||
import assert from 'assert'
|
||||
import getStream from 'get-stream'
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
@@ -6,13 +7,14 @@ import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
import { pipeline } from 'stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { randomBytes, randomUUID } from 'crypto'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
|
||||
import { basename, dirname, normalize as normalizePath } from './_path'
|
||||
import { basename, dirname, normalize as normalizePath } from './path'
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
import { _getEncryptor } from './_encryptor'
|
||||
|
||||
const { warn } = createLogger('@xen-orchestra:fs')
|
||||
const { info, warn } = createLogger('@xen-orchestra:fs')
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime, size) => {
|
||||
@@ -23,6 +25,9 @@ const computeRate = (hrtime, size) => {
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
|
||||
|
||||
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
|
||||
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
|
||||
|
||||
const ignoreEnoent = error => {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
@@ -63,6 +68,7 @@ class PrefixWrapper {
|
||||
}
|
||||
|
||||
export default class RemoteHandlerAbstract {
|
||||
_encryptor
|
||||
constructor(remote, options = {}) {
|
||||
if (remote.url === 'test://') {
|
||||
this._remote = remote
|
||||
@@ -73,6 +79,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
;({ highWaterMark: this._highWaterMark, timeout: this._timeout = DEFAULT_TIMEOUT } = options)
|
||||
this._encryptor = _getEncryptor(this._remote.encryptionKey)
|
||||
|
||||
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
|
||||
this.closeFile = sharedLimit(this.closeFile)
|
||||
@@ -111,90 +118,51 @@ export default class RemoteHandlerAbstract {
|
||||
await this.__closeFile(fd)
|
||||
}
|
||||
|
||||
// TODO: remove method
|
||||
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
|
||||
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
|
||||
if (options.end !== undefined || options.start !== undefined) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
|
||||
}
|
||||
if (typeof file === 'string') {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout.call(
|
||||
this._createOutputStream(file, {
|
||||
dirMode,
|
||||
flags: 'wx',
|
||||
...options,
|
||||
}),
|
||||
|
||||
let stream = await timeout.call(
|
||||
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
|
||||
this._timeout
|
||||
)
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
}
|
||||
// detect early errors
|
||||
await fromEvent(stream, 'readable')
|
||||
|
||||
const checksumStream = createChecksumStream()
|
||||
const forwardError = error => {
|
||||
checksumStream.emit('error', error)
|
||||
}
|
||||
if (checksum) {
|
||||
try {
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
|
||||
|
||||
const stream = await streamP
|
||||
stream.on('error', forwardError)
|
||||
checksumStream.pipe(stream)
|
||||
|
||||
checksumStream.checksumWritten = checksumStream.checksum
|
||||
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
|
||||
.catch(forwardError)
|
||||
|
||||
return checksumStream
|
||||
}
|
||||
|
||||
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
|
||||
if (typeof file === 'string') {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout
|
||||
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
|
||||
.then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this._getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
const { length } = stream
|
||||
stream = validChecksumOfReadStream(stream, String(checksum).trim())
|
||||
stream.length = length
|
||||
} catch (error) {
|
||||
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
|
||||
throw error
|
||||
}
|
||||
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
}
|
||||
|
||||
// avoid a unhandled rejection warning
|
||||
ignoreErrors.call(streamP)
|
||||
|
||||
return this._readFile(checksumFile(path), { flags: 'r' }).then(
|
||||
checksum =>
|
||||
streamP.then(stream => {
|
||||
const { length } = stream
|
||||
stream = validChecksumOfReadStream(stream, String(checksum).trim())
|
||||
stream.length = length
|
||||
|
||||
return stream
|
||||
}),
|
||||
error => {
|
||||
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
|
||||
return streamP
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
if (this.isEncrypted) {
|
||||
stream = this._encryptor.decryptStream(stream)
|
||||
} else {
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
try {
|
||||
stream.length = await this._getSize(file)
|
||||
} catch (error) {
|
||||
// ignore errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -210,6 +178,8 @@ export default class RemoteHandlerAbstract {
|
||||
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
|
||||
path = normalizePath(path)
|
||||
let checksumStream
|
||||
|
||||
input = this._encryptor.encryptStream(input)
|
||||
if (checksum) {
|
||||
checksumStream = createChecksumStream()
|
||||
pipeline(input, checksumStream, noop)
|
||||
@@ -220,6 +190,8 @@ export default class RemoteHandlerAbstract {
|
||||
validator,
|
||||
})
|
||||
if (checksum) {
|
||||
// using _outpuFile means the checksum will NOT be encrypted
|
||||
// it is by design to allow checking of encrypted files without the key
|
||||
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
|
||||
}
|
||||
}
|
||||
@@ -239,8 +211,13 @@ export default class RemoteHandlerAbstract {
|
||||
return timeout.call(this._getInfo(), this._timeout)
|
||||
}
|
||||
|
||||
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
|
||||
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
|
||||
async getSize(file) {
|
||||
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
|
||||
|
||||
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
return size - this._encryptor.ivLength
|
||||
}
|
||||
|
||||
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
|
||||
@@ -286,15 +263,18 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
|
||||
await this._outputFile(normalizePath(file), data, { dirMode, flags })
|
||||
const encryptedData = this._encryptor.encryptData(data)
|
||||
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
|
||||
}
|
||||
|
||||
async read(file, buffer, position) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
|
||||
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async readFile(file, { flags = 'r' } = {}) {
|
||||
return this._readFile(normalizePath(file), { flags })
|
||||
const data = await this._readFile(normalizePath(file), { flags })
|
||||
return this._encryptor.decryptData(data)
|
||||
}
|
||||
|
||||
async rename(oldPath, newPath, { checksum = false } = {}) {
|
||||
@@ -334,6 +314,61 @@ export default class RemoteHandlerAbstract {
|
||||
@synchronized()
|
||||
async sync() {
|
||||
await this._sync()
|
||||
try {
|
||||
await this._checkMetadata()
|
||||
} catch (error) {
|
||||
await this._forget()
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _canWriteMetadata() {
|
||||
const list = await this.list('/', {
|
||||
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
|
||||
})
|
||||
return list.length === 0
|
||||
}
|
||||
|
||||
async _createMetadata() {
|
||||
await Promise.all([
|
||||
this._writeFile(
|
||||
normalizePath(ENCRYPTION_DESC_FILENAME),
|
||||
JSON.stringify({ algorithm: this._encryptor.algorithm }),
|
||||
{
|
||||
flags: 'w',
|
||||
}
|
||||
), // not encrypted
|
||||
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
|
||||
])
|
||||
}
|
||||
|
||||
async _checkMetadata() {
|
||||
try {
|
||||
// this file is not encrypted
|
||||
const data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
|
||||
JSON.parse(data)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// this file is encrypted
|
||||
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME)
|
||||
JSON.parse(data)
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT' || (await this._canWriteMetadata())) {
|
||||
info('will update metadata of this remote')
|
||||
return this._createMetadata()
|
||||
}
|
||||
warn(
|
||||
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
|
||||
{ error }
|
||||
)
|
||||
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async test() {
|
||||
@@ -387,11 +422,13 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async write(file, buffer, position) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
|
||||
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async writeFile(file, data, { flags = 'wx' } = {}) {
|
||||
await this._writeFile(normalizePath(file), data, { flags })
|
||||
const encryptedData = this._encryptor.encryptData(data)
|
||||
await this._writeFile(normalizePath(file), encryptedData, { flags })
|
||||
}
|
||||
|
||||
// Methods that can be called by private methods to avoid parallel limit on public methods
|
||||
@@ -424,6 +461,10 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
// Methods that can be implemented by inheriting classes
|
||||
|
||||
useVhdDirectory() {
|
||||
return this._remote.useVhdDirectory ?? false
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
@@ -506,9 +547,13 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
async _outputStream(path, input, { dirMode, validator }) {
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await this.createOutputStream(tmpPath, {
|
||||
dirMode,
|
||||
})
|
||||
const output = await timeout.call(
|
||||
this._createOutputStream(tmpPath, {
|
||||
dirMode,
|
||||
flags: 'wx',
|
||||
}),
|
||||
this._timeout
|
||||
)
|
||||
try {
|
||||
await fromCallback(pipeline, input, output)
|
||||
if (validator !== undefined) {
|
||||
@@ -591,6 +636,10 @@ export default class RemoteHandlerAbstract {
|
||||
async _writeFile(file, data, options) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
get isEncrypted() {
|
||||
return this._encryptor.id !== 'NULL_ENCRYPTOR'
|
||||
}
|
||||
}
|
||||
|
||||
function createPrefixWrapperMethods() {
|
||||
|
||||
@@ -30,18 +30,6 @@ describe('closeFile()', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('createOutputStream()', () => {
|
||||
it(`throws in case of timeout`, async () => {
|
||||
const testHandler = new TestHandler({
|
||||
createOutputStream: () => new Promise(() => {}),
|
||||
})
|
||||
|
||||
const promise = testHandler.createOutputStream('File')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getInfo()', () => {
|
||||
it('throws in case of timeout', async () => {
|
||||
const testHandler = new TestHandler({
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
import { getHandler } from '.'
|
||||
@@ -27,9 +24,6 @@ const unsecureRandomBytes = n => {
|
||||
|
||||
const TEST_DATA_LEN = 1024
|
||||
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
|
||||
const createTestDataStream = asyncIteratorToStream(function* () {
|
||||
yield TEST_DATA
|
||||
})
|
||||
|
||||
const rejectionOf = p =>
|
||||
p.then(
|
||||
@@ -82,14 +76,6 @@ handlers.forEach(url => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('#createOutputStream()', () => {
|
||||
it('creates parent dir if missing', async () => {
|
||||
const stream = await handler.createOutputStream('dir/file')
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#getInfo()', () => {
|
||||
let info
|
||||
beforeAll(async () => {
|
||||
|
||||
@@ -5,7 +5,6 @@ import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerS3 from './s3'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
import RemoteHandlerSmbMount from './smb-mount'
|
||||
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
@@ -15,10 +14,8 @@ const HANDLERS = {
|
||||
|
||||
try {
|
||||
execa.sync('mount.cifs', ['-V'])
|
||||
HANDLERS.smb = RemoteHandlerSmbMount
|
||||
} catch (_) {
|
||||
HANDLERS.smb = RemoteHandlerSmb
|
||||
}
|
||||
} catch (_) {}
|
||||
|
||||
export const getHandler = (remote, ...rest) => {
|
||||
const Handler = HANDLERS[parse(remote.url).type]
|
||||
|
||||
@@ -1,13 +1,38 @@
|
||||
import df from '@sindresorhus/df'
|
||||
import fs from 'fs-extra'
|
||||
import lockfile from 'proper-lockfile'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { fromEvent, retry } from 'promise-toolbox'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
|
||||
const { info, warn } = createLogger('xo:fs:local')
|
||||
|
||||
// save current stack trace and add it to any rejected error
|
||||
//
|
||||
// This is especially useful when the resolution is separate from the initial
|
||||
// call, which is often the case with RPC libs.
|
||||
//
|
||||
// There is a perf impact and it should be avoided in production.
|
||||
async function addSyncStackTrace(fn, ...args) {
|
||||
const stackContainer = new Error()
|
||||
try {
|
||||
return await fn.apply(this, args)
|
||||
} catch (error) {
|
||||
error.syncStack = stackContainer.stack
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
function dontAddSyncStackTrace(fn, ...args) {
|
||||
return fn.apply(this, args)
|
||||
}
|
||||
|
||||
export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
constructor(remote, opts = {}) {
|
||||
super(remote)
|
||||
|
||||
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
|
||||
this._retriesOnEagain = {
|
||||
delay: 1e3,
|
||||
retries: 9,
|
||||
@@ -30,17 +55,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
return fs.close(fd)
|
||||
return this._addSyncStackTrace(fs.close, fd)
|
||||
}
|
||||
|
||||
async _copy(oldPath, newPath) {
|
||||
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
return this._addSyncStackTrace(fs.copy, this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
const stream = fs.createReadStream(this._getFilePath(file), options)
|
||||
await fromEvent(stream, 'open')
|
||||
await this._addSyncStackTrace(fromEvent, stream, 'open')
|
||||
return stream
|
||||
}
|
||||
return fs.createReadStream('', {
|
||||
@@ -53,7 +78,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
async _createWriteStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
const stream = fs.createWriteStream(this._getFilePath(file), options)
|
||||
await fromEvent(stream, 'open')
|
||||
await this._addSyncStackTrace(fromEvent, stream, 'open')
|
||||
return stream
|
||||
}
|
||||
return fs.createWriteStream('', {
|
||||
@@ -79,71 +104,98 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
const stats = await this._addSyncStackTrace(fs.stat, this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
return stats.size
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
return fs.readdir(this._getFilePath(dir))
|
||||
return this._addSyncStackTrace(fs.readdir, this._getFilePath(dir))
|
||||
}
|
||||
|
||||
_lock(path) {
|
||||
return lockfile.lock(this._getFilePath(path))
|
||||
async _lock(path) {
|
||||
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
|
||||
async onCompromised(error) {
|
||||
warn('lock compromised', { error })
|
||||
try {
|
||||
release = await acquire()
|
||||
info('compromised lock was reacquired')
|
||||
} catch (error) {
|
||||
warn('compromised lock could not be reacquired', { error })
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
let release = await this._addSyncStackTrace(acquire)
|
||||
|
||||
return async () => {
|
||||
try {
|
||||
await this._addSyncStackTrace(release)
|
||||
} catch (error) {
|
||||
warn('lock could not be released', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return fs.mkdir(this._getFilePath(dir), { mode })
|
||||
return this._addSyncStackTrace(fs.mkdir, this._getFilePath(dir), { mode })
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
return fs.open(this._getFilePath(path), flags)
|
||||
return this._addSyncStackTrace(fs.open, this._getFilePath(path), flags)
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
|
||||
file = needsClose ? await this._addSyncStackTrace(fs.open, this._getFilePath(file), 'r') : file.fd
|
||||
try {
|
||||
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
|
||||
return await this._addSyncStackTrace(
|
||||
fs.read,
|
||||
file,
|
||||
buffer,
|
||||
0,
|
||||
buffer.length,
|
||||
position === undefined ? null : position
|
||||
)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await fs.close(file)
|
||||
await this._addSyncStackTrace(fs.close, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _readFile(file, options) {
|
||||
const filePath = this._getFilePath(file)
|
||||
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
|
||||
return await this._addSyncStackTrace(retry, () => fs.readFile(filePath, options), this._retriesOnEagain)
|
||||
}
|
||||
|
||||
async _rename(oldPath, newPath) {
|
||||
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
return this._addSyncStackTrace(fs.rename, this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
}
|
||||
|
||||
async _rmdir(dir) {
|
||||
return fs.rmdir(this._getFilePath(dir))
|
||||
return this._addSyncStackTrace(fs.rmdir, this._getFilePath(dir))
|
||||
}
|
||||
|
||||
async _sync() {
|
||||
const path = this._getRealPath('/')
|
||||
await fs.ensureDir(path)
|
||||
await fs.access(path, fs.R_OK | fs.W_OK)
|
||||
await this._addSyncStackTrace(fs.ensureDir, path)
|
||||
await this._addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return fs.truncate(this._getFilePath(file), len)
|
||||
return this._addSyncStackTrace(fs.truncate, this._getFilePath(file), len)
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
const filePath = this._getFilePath(file)
|
||||
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
|
||||
return await this._addSyncStackTrace(retry, () => fs.unlink(filePath), this._retriesOnEagain)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return fs.write(file.fd, buffer, 0, buffer.length, position)
|
||||
return this._addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
|
||||
return this._addSyncStackTrace(fs.writeFile, this._getFilePath(file), data, { flag: flags })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import path from 'path'
|
||||
|
||||
const { basename, dirname, join, resolve, sep } = path.posix
|
||||
const { basename, dirname, join, resolve, relative, sep } = path.posix
|
||||
|
||||
export { basename, dirname, join }
|
||||
|
||||
@@ -19,3 +19,6 @@ export function split(path) {
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
export const relativeFromFile = (file, path) => relative(dirname(file), path)
|
||||
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
@@ -27,7 +27,7 @@ import copyStreamToBuffer from './_copyStreamToBuffer.js'
|
||||
import createBufferFromStream from './_createBufferFromStream.js'
|
||||
import guessAwsRegion from './_guessAwsRegion.js'
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { basename, join, split } from './_path'
|
||||
import { basename, join, split } from './path'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
|
||||
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
@@ -155,6 +155,14 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
if (e.name === 'EntityTooLarge') {
|
||||
return this._multipartCopy(oldPath, newPath)
|
||||
}
|
||||
// normalize this error code
|
||||
if (e.name === 'NoSuchKey') {
|
||||
const error = new Error(`ENOENT: no such file or directory '${oldPath}'`)
|
||||
error.cause = e
|
||||
error.code = 'ENOENT'
|
||||
error.path = oldPath
|
||||
throw error
|
||||
}
|
||||
throw e
|
||||
}
|
||||
}
|
||||
@@ -525,4 +533,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _closeFile(fd) {}
|
||||
|
||||
useVhdDirectory() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import MountHandler from './_mount'
|
||||
import { normalize } from './_path'
|
||||
|
||||
export default class SmbMountHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalize(path),
|
||||
options: `domain=${domain}`,
|
||||
env: {
|
||||
USER: username,
|
||||
PASSWD: password,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 'smb'
|
||||
}
|
||||
}
|
||||
@@ -1,163 +1,23 @@
|
||||
import Smb2 from '@marsaud/smb2'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import MountHandler from './_mount'
|
||||
import { normalize } from './path'
|
||||
|
||||
// Normalize the error code for file not found.
|
||||
const wrapError = (error, code) => ({
|
||||
__proto__: error,
|
||||
cause: error,
|
||||
code,
|
||||
})
|
||||
const normalizeError = (error, shouldBeDirectory) => {
|
||||
const { code } = error
|
||||
|
||||
throw code === 'STATUS_DIRECTORY_NOT_EMPTY'
|
||||
? wrapError(error, 'ENOTEMPTY')
|
||||
: code === 'STATUS_FILE_IS_A_DIRECTORY'
|
||||
? wrapError(error, 'EISDIR')
|
||||
: code === 'STATUS_NOT_A_DIRECTORY'
|
||||
? wrapError(error, 'ENOTDIR')
|
||||
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
|
||||
? wrapError(error, 'ENOENT')
|
||||
: code === 'STATUS_OBJECT_NAME_COLLISION'
|
||||
? wrapError(error, 'EEXIST')
|
||||
: code === 'STATUS_NOT_SUPPORTED' || code === 'STATUS_INVALID_PARAMETER'
|
||||
? wrapError(error, shouldBeDirectory ? 'ENOTDIR' : 'EISDIR')
|
||||
: error
|
||||
}
|
||||
const normalizeDirError = error => normalizeError(error, true)
|
||||
|
||||
export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
export default class SmbHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
super(remote, opts)
|
||||
|
||||
// defined in _sync()
|
||||
this._client = undefined
|
||||
|
||||
const prefix = this._remote.path
|
||||
this._prefix = prefix !== '' ? prefix + '\\' : prefix
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalize(path),
|
||||
options: `domain=${domain}`,
|
||||
env: {
|
||||
USER: username,
|
||||
PASSWD: password,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 'smb'
|
||||
}
|
||||
|
||||
_getFilePath(file) {
|
||||
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
|
||||
}
|
||||
|
||||
_dirname(file) {
|
||||
const parts = file.split('\\')
|
||||
parts.pop()
|
||||
return parts.join('\\')
|
||||
}
|
||||
|
||||
_closeFile(file) {
|
||||
return this._client.close(file).catch(normalizeError)
|
||||
}
|
||||
|
||||
_createReadStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
file = this._getFilePath(file)
|
||||
} else {
|
||||
options = { autoClose: false, ...options, fd: file.fd }
|
||||
file = ''
|
||||
}
|
||||
return this._client.createReadStream(file, options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_createWriteStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
file = this._getFilePath(file)
|
||||
} else {
|
||||
options = { autoClose: false, ...options, fd: file.fd }
|
||||
file = ''
|
||||
}
|
||||
return this._client.createWriteStream(file, options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_forget() {
|
||||
const client = this._client
|
||||
this._client = undefined
|
||||
return client.disconnect()
|
||||
}
|
||||
|
||||
_getSize(file) {
|
||||
return this._client.getSize(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_list(dir) {
|
||||
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
// TODO: add flags
|
||||
_openFile(path, flags) {
|
||||
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
const client = this._client
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await client.open(this._getFilePath(file)) : file.fd
|
||||
try {
|
||||
return await client.read(file, buffer, 0, buffer.length, position)
|
||||
} catch (error) {
|
||||
normalizeError(error)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await client.close(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_readFile(file, options) {
|
||||
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_rename(oldPath, newPath) {
|
||||
return this._client
|
||||
.rename(this._getFilePath(oldPath), this._getFilePath(newPath), {
|
||||
replace: true,
|
||||
})
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_rmdir(dir) {
|
||||
return this._client.rmdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
_sync() {
|
||||
const remote = this._remote
|
||||
|
||||
this._client = new Smb2({
|
||||
share: `\\\\${remote.host}`,
|
||||
domain: remote.domain,
|
||||
username: remote.username,
|
||||
password: remote.password,
|
||||
autoCloseTimeout: 0,
|
||||
})
|
||||
|
||||
// Check access (smb2 does not expose connect in public so far...)
|
||||
return this.list('.')
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._client.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,7 @@
|
||||
|
||||
const camelCase = require('lodash/camelCase')
|
||||
|
||||
const {
|
||||
defineProperties,
|
||||
defineProperty,
|
||||
hasOwn = Function.prototype.call.bind(Object.prototype.hasOwnProperty),
|
||||
keys,
|
||||
} = Object
|
||||
const { defineProperties, defineProperty, keys } = Object
|
||||
const noop = Function.prototype
|
||||
|
||||
const MIXIN_CYCLIC_DESCRIPTOR = {
|
||||
@@ -18,49 +13,23 @@ const MIXIN_CYCLIC_DESCRIPTOR = {
|
||||
}
|
||||
|
||||
module.exports = function mixin(object, mixins, args) {
|
||||
const importing = { __proto__: null }
|
||||
const importers = { __proto__: null }
|
||||
|
||||
function instantiateMixin(name, Mixin) {
|
||||
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
|
||||
const instance = new Mixin(object, ...args)
|
||||
defineProperty(object, name, {
|
||||
value: instance,
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
// add lazy property for each of the mixin, this allows mixins to depend on
|
||||
// one another without any special ordering
|
||||
const descriptors = {
|
||||
loadMixin(name) {
|
||||
if (hasOwn(this, name)) {
|
||||
return Promise.resolve(this[name])
|
||||
}
|
||||
|
||||
let promise = importing[name]
|
||||
if (promise === undefined) {
|
||||
const clean = () => {
|
||||
delete importing[name]
|
||||
}
|
||||
promise = importers[name]().then(Mixin => instantiateMixin(name, Mixin))
|
||||
promise.then(clean, clean)
|
||||
importing[name] = promise
|
||||
}
|
||||
return promise
|
||||
},
|
||||
}
|
||||
const descriptors = {}
|
||||
keys(mixins).forEach(name => {
|
||||
const Mixin = mixins[name]
|
||||
name = camelCase(name)
|
||||
|
||||
if (Mixin.prototype === undefined) {
|
||||
importers[name] = Mixin(name)
|
||||
} else {
|
||||
descriptors[name] = {
|
||||
configurable: true,
|
||||
get: () => instantiateMixin(name, Mixin),
|
||||
}
|
||||
descriptors[name] = {
|
||||
configurable: true,
|
||||
get: () => {
|
||||
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
|
||||
const instance = new Mixin(object, ...args)
|
||||
defineProperty(object, name, {
|
||||
value: instance,
|
||||
})
|
||||
return instance
|
||||
},
|
||||
}
|
||||
})
|
||||
defineProperties(object, descriptors)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"engines": {
|
||||
"node": ">=7.6"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
|
||||
@@ -30,20 +30,41 @@ export default class Hooks extends EventEmitter {
|
||||
// Run *start* async listeners.
|
||||
//
|
||||
// They initialize the application.
|
||||
//
|
||||
// *startCore* is automatically called if necessary.
|
||||
async start() {
|
||||
assert.strictEqual(this._status, 'stopped')
|
||||
if (this._status === 'stopped') {
|
||||
await this.startCore()
|
||||
} else {
|
||||
assert.strictEqual(this._status, 'core started')
|
||||
}
|
||||
this._status = 'starting'
|
||||
await runHook(this, 'start')
|
||||
this.emit((this._status = 'started'))
|
||||
}
|
||||
|
||||
// Run *stop* async listeners.
|
||||
// Run *start core* async listeners.
|
||||
//
|
||||
// They initialize core features of the application (connect to databases,
|
||||
// etc.) and should be fast and side-effects free.
|
||||
async startCore() {
|
||||
assert.strictEqual(this._status, 'stopped')
|
||||
this._status = 'starting core'
|
||||
await runHook(this, 'start core')
|
||||
this.emit((this._status = 'core started'))
|
||||
}
|
||||
|
||||
// Run *stop* async listeners if necessary and *stop core* listeners.
|
||||
//
|
||||
// They close connections, unmount file systems, save states, etc.
|
||||
async stop() {
|
||||
assert.strictEqual(this._status, 'started')
|
||||
this._status = 'stopping'
|
||||
await runHook(this, 'stop')
|
||||
if (this._status !== 'core started') {
|
||||
assert.strictEqual(this._status, 'started')
|
||||
this._status = 'stopping'
|
||||
await runHook(this, 'stop')
|
||||
this._status = 'core started'
|
||||
}
|
||||
await runHook(this, 'stop core')
|
||||
this.emit((this._status = 'stopped'))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ export default class HttpProxy {
|
||||
this.#app = app
|
||||
|
||||
const events = new EventListenersManager(httpServer)
|
||||
app.config.watch('http.proxy.enabled', (enabled = false) => {
|
||||
app.config.watch('http.proxy.enabled', (enabled = true) => {
|
||||
events.removeAll()
|
||||
if (enabled) {
|
||||
events.add('connect', this.#handleConnect.bind(this)).add('request', this.#handleRequest.bind(this))
|
||||
|
||||
214
@xen-orchestra/mixins/SslCertificate.mjs
Normal file
214
@xen-orchestra/mixins/SslCertificate.mjs
Normal file
@@ -0,0 +1,214 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createSecureContext } from 'tls'
|
||||
import { dirname } from 'node:path'
|
||||
import { X509Certificate } from 'node:crypto'
|
||||
import acme from 'acme-client'
|
||||
import fs from 'node:fs/promises'
|
||||
import get from 'lodash/get.js'
|
||||
|
||||
const { debug, info, warn } = createLogger('xo:mixins:sslCertificate')
|
||||
|
||||
acme.setLogger(message => {
|
||||
debug(message)
|
||||
})
|
||||
|
||||
// - create any missing parent directories
|
||||
// - replace existing files
|
||||
// - secure permissions (read-only for the owner)
|
||||
async function outputFile(path, content) {
|
||||
await fs.mkdir(dirname(path), { recursive: true })
|
||||
try {
|
||||
await fs.unlink(path)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
await fs.writeFile(path, content, { flag: 'wx', mode: 0o400 })
|
||||
}
|
||||
|
||||
// from https://github.com/publishlab/node-acme-client/blob/master/examples/auto.js
|
||||
class SslCertificate {
|
||||
#cert
|
||||
#challengeCreateFn
|
||||
#challengeRemoveFn
|
||||
#delayBeforeRenewal = 30 * 24 * 60 * 60 * 1000 // 30 days
|
||||
#secureContext
|
||||
#updateSslCertificatePromise
|
||||
|
||||
constructor({ challengeCreateFn, challengeRemoveFn }, cert, key) {
|
||||
this.#challengeCreateFn = challengeCreateFn
|
||||
this.#challengeRemoveFn = challengeRemoveFn
|
||||
|
||||
this.#set(cert, key)
|
||||
}
|
||||
|
||||
get #isValid() {
|
||||
const cert = this.#cert
|
||||
return cert !== undefined && Date.parse(cert.validTo) > Date.now() && cert.issuer !== cert.subject
|
||||
}
|
||||
|
||||
get #shouldBeRenewed() {
|
||||
return !(this.#isValid && Date.parse(this.#cert.validTo) > Date.now() + this.#delayBeforeRenewal)
|
||||
}
|
||||
|
||||
#set(cert, key) {
|
||||
this.#cert = new X509Certificate(cert)
|
||||
this.#secureContext = createSecureContext({ cert, key })
|
||||
}
|
||||
|
||||
async getSecureContext(config) {
|
||||
if (!this.#shouldBeRenewed) {
|
||||
return this.#secureContext
|
||||
}
|
||||
|
||||
if (this.#updateSslCertificatePromise === undefined) {
|
||||
// not currently updating certificate
|
||||
//
|
||||
// ensure we only refresh certificate once at a time
|
||||
//
|
||||
// promise is cleaned by #updateSslCertificate itself
|
||||
this.#updateSslCertificatePromise = this.#updateSslCertificate(config)
|
||||
}
|
||||
|
||||
// old certificate is still here, return it while updating
|
||||
if (this.#isValid) {
|
||||
return this.#secureContext
|
||||
}
|
||||
|
||||
return this.#updateSslCertificatePromise
|
||||
}
|
||||
|
||||
async #save(certPath, cert, keyPath, key) {
|
||||
try {
|
||||
await Promise.all([outputFile(keyPath, key), outputFile(certPath, cert)])
|
||||
info('new certificate generated', { cert: certPath, key: keyPath })
|
||||
} catch (error) {
|
||||
warn(`couldn't write let's encrypt certificates to disk `, { error })
|
||||
}
|
||||
}
|
||||
|
||||
async #updateSslCertificate(config) {
|
||||
const { cert: certPath, key: keyPath, acmeEmail, acmeDomain } = config
|
||||
try {
|
||||
let { acmeCa = 'letsencrypt/production' } = config
|
||||
if (!(acmeCa.startsWith('http:') || acmeCa.startsWith('https:'))) {
|
||||
acmeCa = get(acme.directory, acmeCa.split('/'))
|
||||
}
|
||||
|
||||
/* Init client */
|
||||
const client = new acme.Client({
|
||||
directoryUrl: acmeCa,
|
||||
accountKey: await acme.crypto.createPrivateKey(),
|
||||
})
|
||||
|
||||
/* Create CSR */
|
||||
let [key, csr] = await acme.crypto.createCsr({
|
||||
commonName: acmeDomain,
|
||||
})
|
||||
csr = csr.toString()
|
||||
key = key.toString()
|
||||
debug('Successfully generated key and csr')
|
||||
|
||||
/* Certificate */
|
||||
const cert = await client.auto({
|
||||
challengeCreateFn: this.#challengeCreateFn,
|
||||
challengePriority: ['http-01'],
|
||||
challengeRemoveFn: this.#challengeRemoveFn,
|
||||
csr,
|
||||
email: acmeEmail,
|
||||
skipChallengeVerification: true,
|
||||
termsOfServiceAgreed: true,
|
||||
})
|
||||
debug('Successfully generated certificate')
|
||||
|
||||
this.#set(cert, key)
|
||||
|
||||
// don't wait for this
|
||||
this.#save(certPath, cert, keyPath, key)
|
||||
|
||||
return this.#secureContext
|
||||
} catch (error) {
|
||||
warn(`couldn't renew ssl certificate`, { acmeDomain, error })
|
||||
} finally {
|
||||
this.#updateSslCertificatePromise = undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default class SslCertificates {
|
||||
#app
|
||||
#challenges = new Map()
|
||||
#challengeHandlers = {
|
||||
challengeCreateFn: (authz, challenge, keyAuthorization) => {
|
||||
this.#challenges.set(challenge.token, keyAuthorization)
|
||||
},
|
||||
challengeRemoveFn: (authz, challenge, keyAuthorization) => {
|
||||
this.#challenges.delete(challenge.token)
|
||||
},
|
||||
}
|
||||
#handlers = new Map()
|
||||
|
||||
constructor(app, { httpServer }) {
|
||||
// don't setup the proxy if httpServer is not present
|
||||
//
|
||||
// that can happen when the app is instanciated in another context like xo-server-recover-account
|
||||
if (httpServer === undefined) {
|
||||
return
|
||||
}
|
||||
const prefix = '/.well-known/acme-challenge/'
|
||||
httpServer.on('request', (req, res) => {
|
||||
const { url } = req
|
||||
if (url.startsWith(prefix)) {
|
||||
const token = url.slice(prefix.length)
|
||||
this.#acmeChallendMiddleware(req, res, token)
|
||||
}
|
||||
})
|
||||
|
||||
this.#app = app
|
||||
|
||||
httpServer.getSecureContext = this.getSecureContext.bind(this)
|
||||
}
|
||||
|
||||
async getSecureContext(httpsDomainName, configKey, initialCert, initialKey) {
|
||||
const config = this.#app.config.get(['http', 'listen', configKey])
|
||||
const handlers = this.#handlers
|
||||
|
||||
const { acmeDomain } = config
|
||||
|
||||
// not a let's encrypt protected end point, sommething changed in the configuration
|
||||
if (acmeDomain === undefined) {
|
||||
handlers.delete(configKey)
|
||||
return
|
||||
}
|
||||
|
||||
// server has been access with another domain, don't use the certificate
|
||||
if (acmeDomain !== httpsDomainName) {
|
||||
return
|
||||
}
|
||||
|
||||
let handler = handlers.get(configKey)
|
||||
if (handler === undefined) {
|
||||
// register the handler for this domain
|
||||
handler = new SslCertificate(this.#challengeHandlers, initialCert, initialKey)
|
||||
handlers.set(configKey, handler)
|
||||
}
|
||||
return handler.getSecureContext(config)
|
||||
}
|
||||
|
||||
// middleware that will serve the http challenge to let's encrypt servers
|
||||
#acmeChallendMiddleware(req, res, token) {
|
||||
debug('fetching challenge for token ', token)
|
||||
const challenge = this.#challenges.get(token)
|
||||
debug('challenge content is ', challenge)
|
||||
if (challenge === undefined) {
|
||||
res.statusCode = 404
|
||||
res.end()
|
||||
return
|
||||
}
|
||||
|
||||
res.write(challenge)
|
||||
res.end()
|
||||
debug('successfully answered challenge ')
|
||||
}
|
||||
}
|
||||
@@ -10,11 +10,11 @@
|
||||
|
||||
## Set up
|
||||
|
||||
The proxy is disabled by default, to enable it, add the following lines to your config:
|
||||
The proxy is enabled by default, to disable it, add the following lines to your config:
|
||||
|
||||
```toml
|
||||
[http.proxy]
|
||||
enabled = true
|
||||
enabled = false
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
49
@xen-orchestra/mixins/docs/SslCertificate.md
Normal file
49
@xen-orchestra/mixins/docs/SslCertificate.md
Normal file
@@ -0,0 +1,49 @@
|
||||
> This module provides [Let's Encrypt](https://letsencrypt.org/) integration to `xo-proxy` and `xo-server`.
|
||||
|
||||
First of all, make sure your server is listening on HTTP on port 80 and on HTTPS 443.
|
||||
|
||||
In `xo-server`, to avoid HTTP access, enable the redirection to HTTPs:
|
||||
|
||||
```toml
|
||||
[http]
|
||||
redirectToHttps = true
|
||||
```
|
||||
|
||||
Your server must be reachable with the configured domain to the certificate provider (e.g. Let's Encrypt), it usually means publicly reachable.
|
||||
|
||||
Finally, add the following entries to your HTTPS configuration.
|
||||
|
||||
```toml
|
||||
# Must be set to true for this feature
|
||||
autoCert = true
|
||||
|
||||
# These entries are required and indicates where the certificate and the
|
||||
# private key will be saved.
|
||||
cert = 'path/to/cert.pem'
|
||||
key = 'path/to/key.pem'
|
||||
|
||||
# ACME (e.g. Let's Encrypt, ZeroSSL) CA directory
|
||||
#
|
||||
# Specifies the URL to the ACME CA's directory.
|
||||
#
|
||||
# A identifier `provider/directory` can be passed instead of a URL, see the
|
||||
# list of supported directories here: https://www.npmjs.com/package/acme-client#directory-urls
|
||||
#
|
||||
# Note that the application cannot detect that this value has changed.
|
||||
#
|
||||
# In that case delete the certificate and the key files, and restart the
|
||||
# application to generate new ones.
|
||||
#
|
||||
# Default is 'letsencrypt/production'
|
||||
acmeCa = 'zerossl/production'
|
||||
|
||||
# Domain for which the certificate should be created.
|
||||
#
|
||||
# This entry is required.
|
||||
acmeDomain = 'my.domain.net'
|
||||
|
||||
# Optional email address which will be used for the certificate creation.
|
||||
#
|
||||
# It will be notified of any issues.
|
||||
acmeEmail = 'admin@my.domain.net'
|
||||
```
|
||||
@@ -14,15 +14,16 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "0.5.0",
|
||||
"version": "0.7.1",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
"node": ">=15.6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/event-listeners-manager": "^1.0.0",
|
||||
"@vates/event-listeners-manager": "^1.0.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/emit-async": "^1.0.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"acme-client": "^5.0.0",
|
||||
"app-conf": "^2.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -30,7 +30,7 @@
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/read-chunk": "^0.1.2"
|
||||
"@vates/read-chunk": "^1.0.0"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/proxy-cli",
|
||||
"version": "0.3.0",
|
||||
"version": "0.3.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "CLI for @xen-orchestra/proxy",
|
||||
"keywords": [
|
||||
@@ -26,7 +26,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.0",
|
||||
"@vates/read-chunk": "^0.1.2",
|
||||
"@vates/read-chunk": "^1.0.0",
|
||||
"ansi-colors": "^4.1.1",
|
||||
"app-conf": "^2.1.0",
|
||||
"content-type": "^1.0.4",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import Config from '@xen-orchestra/mixins/Config.mjs'
|
||||
import Hooks from '@xen-orchestra/mixins/Hooks.mjs'
|
||||
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.mjs'
|
||||
import SslCertificate from '@xen-orchestra/mixins/SslCertificate.mjs'
|
||||
import mixin from '@xen-orchestra/mixin'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||
|
||||
@@ -14,9 +15,23 @@ import ReverseProxy from './mixins/reverseProxy.mjs'
|
||||
|
||||
export default class App {
|
||||
constructor(opts) {
|
||||
mixin(this, { Api, Appliance, Authentication, Backups, Config, Hooks, HttpProxy, Logs, Remotes, ReverseProxy }, [
|
||||
opts,
|
||||
])
|
||||
mixin(
|
||||
this,
|
||||
{
|
||||
Api,
|
||||
Appliance,
|
||||
Authentication,
|
||||
Backups,
|
||||
Config,
|
||||
Hooks,
|
||||
HttpProxy,
|
||||
Logs,
|
||||
Remotes,
|
||||
ReverseProxy,
|
||||
SslCertificate,
|
||||
},
|
||||
[opts]
|
||||
)
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
this.config.watchDuration('resourceCacheDelay', delay => {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { format, parse, MethodNotFound } from 'json-rpc-protocol'
|
||||
import { format, parse, MethodNotFound, JsonRpcError } from 'json-rpc-protocol'
|
||||
import * as errors from 'xo-common/api-errors.js'
|
||||
import Ajv from 'ajv'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
@@ -9,6 +9,7 @@ import helmet from 'koa-helmet'
|
||||
import Koa from 'koa'
|
||||
import once from 'lodash/once.js'
|
||||
import Router from '@koa/router'
|
||||
import stubTrue from 'lodash/stubTrue.js'
|
||||
import Zone from 'node-zone'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
@@ -77,7 +78,19 @@ export default class Api {
|
||||
const { method, params } = body
|
||||
warn('call error', { method, params, error })
|
||||
ctx.set('Content-Type', 'application/json')
|
||||
ctx.body = format.error(body.id, error)
|
||||
|
||||
let e = error
|
||||
if (error != null && typeof error.toJsonRpcError !== 'function') {
|
||||
const { message, ...data } = error
|
||||
|
||||
// force these entries even if they are not enumerable
|
||||
data.code = error.code
|
||||
data.stack = error.stack
|
||||
|
||||
e = new JsonRpcError(error.message, undefined, data)
|
||||
}
|
||||
|
||||
ctx.body = format.error(body.id, e)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -166,14 +179,20 @@ export default class Api {
|
||||
throw errors.noSuchObject('method', name)
|
||||
}
|
||||
|
||||
const { description, params = {} } = method
|
||||
return { description, name, params }
|
||||
const { description, params = {}, result = {} } = method
|
||||
return { description, name, params, result }
|
||||
},
|
||||
{
|
||||
description: 'returns the signature of an API method',
|
||||
params: {
|
||||
method: { type: 'string' },
|
||||
},
|
||||
result: {
|
||||
description: { type: 'string' },
|
||||
name: { type: 'string' },
|
||||
params: { type: 'object' },
|
||||
result: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -205,40 +224,29 @@ export default class Api {
|
||||
})
|
||||
}
|
||||
|
||||
addMethod(name, method, { description, params = {} } = {}) {
|
||||
addMethod(name, method, { description, params = {}, result: resultSchema } = {}) {
|
||||
const methods = this._methods
|
||||
|
||||
if (name in methods) {
|
||||
throw new Error(`API method ${name} already exists`)
|
||||
}
|
||||
|
||||
const ajv = this._ajv
|
||||
const validate = ajv.compile({
|
||||
// we want additional properties to be disabled by default
|
||||
additionalProperties: params['*'] || false,
|
||||
const validateParams = this.#compileSchema(params)
|
||||
const validateResult = this.#compileSchema(resultSchema)
|
||||
|
||||
properties: params,
|
||||
|
||||
// we want params to be required by default unless explicitly marked so
|
||||
// we use property `optional` instead of object `required`
|
||||
required: Object.keys(params).filter(name => {
|
||||
const param = params[name]
|
||||
const required = !param.optional
|
||||
delete param.optional
|
||||
return required
|
||||
}),
|
||||
|
||||
type: 'object',
|
||||
})
|
||||
|
||||
const m = params => {
|
||||
if (!validate(params)) {
|
||||
throw errors.invalidParameters(validate.errors)
|
||||
const m = async params => {
|
||||
if (!validateParams(params)) {
|
||||
throw errors.invalidParameters(validateParams.errors)
|
||||
}
|
||||
return method(params)
|
||||
const result = await method(params)
|
||||
if (!validateResult(result)) {
|
||||
warn('invalid API method result', { errors: validateResult.error, result })
|
||||
}
|
||||
return result
|
||||
}
|
||||
m.description = description
|
||||
m.params = params
|
||||
m.result = resultSchema
|
||||
|
||||
methods[name] = m
|
||||
|
||||
@@ -289,4 +297,43 @@ export default class Api {
|
||||
}
|
||||
return fn(params)
|
||||
}
|
||||
|
||||
#compileSchema(schema) {
|
||||
if (schema === undefined) {
|
||||
return stubTrue
|
||||
}
|
||||
|
||||
if (schema.type === undefined) {
|
||||
schema = { type: 'object', properties: schema }
|
||||
}
|
||||
|
||||
const { type } = schema
|
||||
if (Array.isArray(type) ? type.includes('object') : type === 'object') {
|
||||
const { properties = {} } = schema
|
||||
|
||||
if (schema.additionalProperties === undefined) {
|
||||
const wildCard = properties['*']
|
||||
if (wildCard === undefined) {
|
||||
// we want additional properties to be disabled by default
|
||||
schema.additionalProperties = false
|
||||
} else {
|
||||
delete properties['*']
|
||||
schema.additionalProperties = wildCard
|
||||
}
|
||||
}
|
||||
|
||||
// we want properties to be required by default unless explicitly marked so
|
||||
// we use property `optional` instead of object `required`
|
||||
if (schema.required === undefined) {
|
||||
schema.required = Object.keys(properties).filter(name => {
|
||||
const param = properties[name]
|
||||
const required = !param.optional
|
||||
delete param.optional
|
||||
return required
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return this._ajv.compile(schema)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,11 +56,32 @@ ${APP_NAME} v${APP_VERSION}
|
||||
createSecureServer: opts => createSecureServer({ ...opts, allowHTTP1: true }),
|
||||
})
|
||||
|
||||
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }) => {
|
||||
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }, configKey) => {
|
||||
const useAcme = autoCert && opts.acmeDomain !== undefined
|
||||
|
||||
// don't pass these entries to httpServer.listen(opts)
|
||||
for (const key of Object.keys(opts).filter(_ => _.startsWith('acme'))) {
|
||||
delete opts[key]
|
||||
}
|
||||
|
||||
try {
|
||||
const niceAddress = await pRetry(
|
||||
async () => {
|
||||
if (cert !== undefined && key !== undefined) {
|
||||
let niceAddress
|
||||
if (cert !== undefined && key !== undefined) {
|
||||
if (useAcme) {
|
||||
opts.SNICallback = async (serverName, callback) => {
|
||||
try {
|
||||
// injected by mixins/SslCertificate
|
||||
const secureContext = await httpServer.getSecureContext(serverName, configKey, opts.cert, opts.key)
|
||||
callback(null, secureContext)
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
callback(error, null)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
niceAddress = await pRetry(
|
||||
async () => {
|
||||
try {
|
||||
opts.cert = fse.readFileSync(cert)
|
||||
opts.key = fse.readFileSync(key)
|
||||
@@ -76,20 +97,22 @@ ${APP_NAME} v${APP_VERSION}
|
||||
opts.cert = pems.cert
|
||||
opts.key = pems.key
|
||||
}
|
||||
}
|
||||
|
||||
return httpServer.listen(opts)
|
||||
},
|
||||
{
|
||||
tries: 2,
|
||||
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
|
||||
onRetry: () => {
|
||||
warn('deleting invalid certificate')
|
||||
fse.unlinkSync(cert)
|
||||
fse.unlinkSync(key)
|
||||
return httpServer.listen(opts)
|
||||
},
|
||||
}
|
||||
)
|
||||
{
|
||||
tries: 2,
|
||||
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
|
||||
onRetry: () => {
|
||||
warn('deleting invalid certificate')
|
||||
fse.unlinkSync(cert)
|
||||
fse.unlinkSync(key)
|
||||
},
|
||||
}
|
||||
)
|
||||
} else {
|
||||
niceAddress = await httpServer.listen(opts)
|
||||
}
|
||||
|
||||
info(`Web server listening on ${niceAddress}`)
|
||||
} catch (error) {
|
||||
@@ -146,6 +169,7 @@ ${APP_NAME} v${APP_VERSION}
|
||||
process.on(signal, () => {
|
||||
if (alreadyCalled) {
|
||||
warn('forced exit')
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
process.exit(1)
|
||||
}
|
||||
alreadyCalled = true
|
||||
@@ -164,6 +188,7 @@ main(process.argv.slice(2)).then(
|
||||
error => {
|
||||
fatal(error)
|
||||
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
process.exit(1)
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/proxy",
|
||||
"version": "0.23.2",
|
||||
"version": "0.26.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "XO Proxy used to remotely execute backup jobs",
|
||||
"keywords": [
|
||||
@@ -26,19 +26,19 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.0",
|
||||
"@koa/router": "^10.0.0",
|
||||
"@koa/router": "^12.0.0",
|
||||
"@vates/cached-dns.lookup": "^1.0.0",
|
||||
"@vates/compose": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.25.0",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/backups": "^0.27.4",
|
||||
"@xen-orchestra/fs": "^3.0.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/mixin": "^0.1.0",
|
||||
"@xen-orchestra/mixins": "^0.5.0",
|
||||
"@xen-orchestra/mixins": "^0.7.1",
|
||||
"@xen-orchestra/self-signed": "^0.1.3",
|
||||
"@xen-orchestra/xapi": "^1.2.0",
|
||||
"@xen-orchestra/xapi": "^1.4.2",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.1.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
@@ -60,7 +60,7 @@
|
||||
"source-map-support": "^0.5.16",
|
||||
"stoppable": "^1.0.6",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xen-api": "^1.2.1",
|
||||
"xen-api": "^1.2.2",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,8 +1,10 @@
|
||||
import escapeRegExp from 'lodash/escapeRegExp'
|
||||
'use strict'
|
||||
|
||||
const escapeRegExp = require('lodash/escapeRegExp')
|
||||
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
export function compileTemplate(pattern, rules) {
|
||||
exports.compileTemplate = function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
@@ -1,5 +1,8 @@
|
||||
/* eslint-env jest */
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
'use strict'
|
||||
|
||||
const { compileTemplate } = require('.')
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {
|
||||
@@ -14,31 +14,13 @@
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.15"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"pw": "^0.0.4",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.11.1",
|
||||
"xo-vmdk-to-vhd": "^2.4.1"
|
||||
"xo-vmdk-to-vhd": "^2.4.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
1
@xen-orchestra/xapi-typegen/.npmignore
Symbolic link
1
@xen-orchestra/xapi-typegen/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
90
@xen-orchestra/xapi-typegen/_genTs.mjs
Normal file
90
@xen-orchestra/xapi-typegen/_genTs.mjs
Normal file
@@ -0,0 +1,90 @@
|
||||
let indentLevel = 0
|
||||
|
||||
function indent() {
|
||||
return ' '.repeat(indentLevel)
|
||||
}
|
||||
|
||||
function quoteId(name) {
|
||||
return /^[a-z0-9_]+$/i.test(name) ? name : JSON.stringify(name)
|
||||
}
|
||||
|
||||
function genType(type, schema) {
|
||||
if (type === 'array' && schema.items !== undefined) {
|
||||
const { items } = schema
|
||||
if (Array.isArray(items)) {
|
||||
if (items.length !== 0) {
|
||||
return ['[' + items.map(genTs).join(', ') + ']']
|
||||
}
|
||||
} else {
|
||||
const { type } = items
|
||||
if (type !== undefined && type.length !== 0) {
|
||||
return genTs(items, true) + '[]'
|
||||
} else {
|
||||
return 'unknown[]'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (type !== 'object') {
|
||||
return type
|
||||
}
|
||||
|
||||
const code = []
|
||||
|
||||
const { title } = schema
|
||||
const isInterface = title !== undefined
|
||||
if (isInterface) {
|
||||
code.push('interface ', title, ' ')
|
||||
}
|
||||
const fieldDelimiter = (isInterface ? ';' : ',') + '\n'
|
||||
|
||||
const { additionalProperties, properties } = schema
|
||||
const hasAdditionalProperties = additionalProperties?.type !== undefined
|
||||
const propertiesKeys = Object.keys(properties ?? {})
|
||||
|
||||
if (!hasAdditionalProperties && propertiesKeys.length === 0) {
|
||||
code.push('{}')
|
||||
return code.join('')
|
||||
}
|
||||
|
||||
code.push('{\n')
|
||||
++indentLevel
|
||||
|
||||
for (const name of propertiesKeys.sort()) {
|
||||
const schema = properties[name]
|
||||
code.push(indent(), quoteId(name))
|
||||
if (schema.optional) {
|
||||
code.push('?')
|
||||
}
|
||||
code.push(': ')
|
||||
|
||||
code.push(genTs(schema))
|
||||
|
||||
code.push(fieldDelimiter)
|
||||
}
|
||||
|
||||
if (hasAdditionalProperties) {
|
||||
code.push(indent(), '[key: string]: ', genTs(additionalProperties), fieldDelimiter)
|
||||
}
|
||||
|
||||
--indentLevel
|
||||
code.push(indent(), '}')
|
||||
|
||||
return code.join('')
|
||||
}
|
||||
|
||||
export function genTs(schema, groupMultiple = false) {
|
||||
let { type } = schema
|
||||
if (Array.isArray(type)) {
|
||||
if (type.length !== 1) {
|
||||
const code = type
|
||||
.sort()
|
||||
.map(type => genType(type, schema))
|
||||
.join(' | ')
|
||||
|
||||
return groupMultiple ? '(' + code + ')' : code
|
||||
}
|
||||
type = type[0]
|
||||
}
|
||||
return genType(type, schema)
|
||||
}
|
||||
110
@xen-orchestra/xapi-typegen/_updateSchema.mjs
Normal file
110
@xen-orchestra/xapi-typegen/_updateSchema.mjs
Normal file
@@ -0,0 +1,110 @@
|
||||
const JSON_TYPES = {
|
||||
__proto__: null,
|
||||
|
||||
array: true,
|
||||
boolean: true,
|
||||
null: true,
|
||||
number: true,
|
||||
object: true,
|
||||
string: true,
|
||||
}
|
||||
|
||||
function addType(schema, type) {
|
||||
const previous = schema.type
|
||||
if (previous === undefined) {
|
||||
schema.type = type
|
||||
} else if (Array.isArray(previous)) {
|
||||
if (previous.indexOf(type) === -1) {
|
||||
previous.push(type)
|
||||
}
|
||||
} else if (previous !== type) {
|
||||
schema.type = [previous, type]
|
||||
}
|
||||
}
|
||||
|
||||
function getType(value) {
|
||||
let type = typeof value
|
||||
if (type === 'object') {
|
||||
if (value === null) {
|
||||
type = 'null'
|
||||
} else if (Array.isArray(value)) {
|
||||
type = 'array'
|
||||
}
|
||||
}
|
||||
|
||||
if (type in JSON_TYPES) {
|
||||
return type
|
||||
}
|
||||
throw new TypeError('unsupported type: ' + type)
|
||||
}
|
||||
|
||||
// like Math.max but v1 can be undefined
|
||||
const max = (v1, v2) => (v1 > v2 ? v1 : v2)
|
||||
|
||||
// like Math.min but v1 can be undefined
|
||||
const min = (v1, v2) => (v1 < v2 ? v1 : v2)
|
||||
|
||||
function updateSchema_(path, value, schema = { __proto__: null }, getOption) {
|
||||
if (value === undefined) {
|
||||
schema.optional = true
|
||||
} else {
|
||||
const type = getType(value)
|
||||
addType(schema, type)
|
||||
|
||||
if (type === 'array') {
|
||||
const items = schema.items ?? (schema.items = { __proto__: null })
|
||||
const pathLength = path.length
|
||||
if (Array.isArray(items)) {
|
||||
for (let i = 0, n = value.length; i < n; ++i) {
|
||||
path[pathLength] = i
|
||||
items[i] = updateSchema_(path, value[i], items[i], getOption)
|
||||
}
|
||||
} else {
|
||||
for (let i = 0, n = value.length; i < n; ++i) {
|
||||
path[pathLength] = i
|
||||
updateSchema_(path, value[i], items, getOption)
|
||||
}
|
||||
}
|
||||
path.length = pathLength
|
||||
} else if (type === 'number') {
|
||||
if (getOption('computeMinimum', path)) {
|
||||
schema.minimum = min(schema.minimum, value)
|
||||
}
|
||||
if (getOption('computeMaximum', path)) {
|
||||
schema.maximum = max(schema.maximum, value)
|
||||
}
|
||||
} else if (type === 'object') {
|
||||
const pathLength = path.length
|
||||
const { additionalProperties } = schema
|
||||
if (typeof additionalProperties === 'object') {
|
||||
for (const key of Object.keys(value)) {
|
||||
path[pathLength] = key
|
||||
updateSchema_(path, value[key], additionalProperties, getOption)
|
||||
}
|
||||
} else {
|
||||
const properties = schema.properties ?? (schema.properties = { __proto__: null })
|
||||
|
||||
// handle missing properties
|
||||
for (const key of Object.keys(properties)) {
|
||||
if (!Object.hasOwn(value, key)) {
|
||||
properties[key].optional = true
|
||||
}
|
||||
}
|
||||
|
||||
// handle existing properties
|
||||
for (const key of Object.keys(value)) {
|
||||
path[pathLength] = key
|
||||
properties[key] = updateSchema_(path, value[key], properties[key], getOption)
|
||||
}
|
||||
}
|
||||
path.length = pathLength
|
||||
}
|
||||
}
|
||||
|
||||
return schema
|
||||
}
|
||||
|
||||
export function updateSchema(value, schema, options) {
|
||||
const getOption = options == null ? Function.prototype : typeof options === 'object' ? opt => options[opt] : options
|
||||
return updateSchema_([], value, schema, getOption)
|
||||
}
|
||||
65
@xen-orchestra/xapi-typegen/cli.mjs
Normal file
65
@xen-orchestra/xapi-typegen/cli.mjs
Normal file
@@ -0,0 +1,65 @@
|
||||
import { readFileSync } from 'fs'
|
||||
|
||||
import { genTs } from './_genTs.mjs'
|
||||
import { updateSchema } from './_updateSchema.mjs'
|
||||
|
||||
const upperCamelCase = s =>
|
||||
s
|
||||
.split(/[^a-zA-Z]+/)
|
||||
.map(s => s[0].toUpperCase() + s.slice(1).toLocaleLowerCase())
|
||||
.join('')
|
||||
|
||||
const objects = JSON.parse(readFileSync('./objects.json'))
|
||||
for (const type of Object.keys(objects).sort()) {
|
||||
const schema = {
|
||||
__proto__: null,
|
||||
|
||||
title: upperCamelCase(type),
|
||||
type: 'object',
|
||||
properties: {
|
||||
assigned_ips: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
bios_strings: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
features: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
license_params: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
networks: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
other_config: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
other: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
restrictions: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
sm_config: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
xenstore_data: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
VCPUs_utilisation: {
|
||||
additionalProperties: {},
|
||||
},
|
||||
},
|
||||
}
|
||||
for (const object of Object.values(objects[type])) {
|
||||
updateSchema(object, schema)
|
||||
}
|
||||
for (const name of Object.keys(schema.properties)) {
|
||||
if (schema.properties[name].type === undefined) {
|
||||
delete schema.properties[name]
|
||||
}
|
||||
}
|
||||
|
||||
console.log(genTs(schema))
|
||||
}
|
||||
20
@xen-orchestra/xapi-typegen/package.json
Normal file
20
@xen-orchestra/xapi-typegen/package.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/xapi-typegen",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi-typegen",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/xapi-typegen",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "0.0.0",
|
||||
"engines": {
|
||||
"node": ">=16.9"
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xapi",
|
||||
"version": "1.2.0",
|
||||
"version": "1.4.2",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
@@ -15,7 +15,7 @@
|
||||
"node": ">=14"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
@@ -26,9 +26,10 @@
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"json-rpc-protocol": "^0.13.2",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^3.2.0",
|
||||
"vhd-lib": "^4.0.0",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"private": false,
|
||||
|
||||
@@ -4,7 +4,8 @@ const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { incorrectState } = require('xo-common/api-errors')
|
||||
const { VDI_FORMAT_RAW } = require('./index.js')
|
||||
const { VDI_FORMAT_VHD } = require('./index.js')
|
||||
const assert = require('node:assert').strict
|
||||
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
|
||||
|
||||
const AggregateError = require('./_AggregateError.js')
|
||||
@@ -150,12 +151,26 @@ class Sr {
|
||||
$defer,
|
||||
ref,
|
||||
stream,
|
||||
{ name_label = '[XO] Imported disk - ' + new Date().toISOString(), ...vdiCreateOpts } = {}
|
||||
{
|
||||
format = VDI_FORMAT_VHD,
|
||||
name_label = '[XO] Imported disk - ' + new Date().toISOString(),
|
||||
virtual_size,
|
||||
...vdiCreateOpts
|
||||
} = {}
|
||||
) {
|
||||
const footer = await peekFooterFromStream(stream)
|
||||
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size: footer.currentSize })
|
||||
if (virtual_size === undefined) {
|
||||
if (format === VDI_FORMAT_VHD) {
|
||||
const footer = await peekFooterFromStream(stream)
|
||||
virtual_size = footer.currentSize
|
||||
} else {
|
||||
virtual_size = stream.length
|
||||
assert.notEqual(virtual_size, undefined)
|
||||
}
|
||||
}
|
||||
|
||||
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size })
|
||||
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
|
||||
await this.VDI_importContent(vdiRef, stream, { format: VDI_FORMAT_RAW })
|
||||
await this.VDI_importContent(vdiRef, stream, { format })
|
||||
return vdiRef
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const pCatch = require('promise-toolbox/catch')
|
||||
const pRetry = require('promise-toolbox/retry')
|
||||
@@ -86,6 +87,8 @@ class Vdi {
|
||||
}
|
||||
|
||||
async importContent(ref, stream, { cancelToken = CancelToken.none, format }) {
|
||||
assert.notEqual(format, undefined)
|
||||
|
||||
if (stream.length === undefined) {
|
||||
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
|
||||
const { JsonRpcError } = require('json-rpc-protocol')
|
||||
const { Ref } = require('xen-api')
|
||||
|
||||
const extractOpaqueRef = require('./_extractOpaqueRef.js')
|
||||
@@ -509,6 +510,22 @@ class Vm {
|
||||
}
|
||||
return ref
|
||||
} catch (error) {
|
||||
if (
|
||||
// xxhash is the new form consistency hashing in CH 8.1 which uses a faster,
|
||||
// more efficient hashing algorithm to generate the consistency checks
|
||||
// in order to support larger files without the consistency checking process taking an incredibly long time
|
||||
error.code === 'IMPORT_ERROR' &&
|
||||
error.params?.some(
|
||||
param =>
|
||||
param.includes('INTERNAL_ERROR') &&
|
||||
param.includes('Expected to find an inline checksum') &&
|
||||
param.includes('.xxhash')
|
||||
)
|
||||
) {
|
||||
warn('import', { error })
|
||||
throw new JsonRpcError('Importing this VM requires XCP-ng or Citrix Hypervisor >=8.1')
|
||||
}
|
||||
|
||||
// augment the error with as much relevant info as possible
|
||||
const [poolMaster, sr] = await Promise.all([
|
||||
safeGetRecord(this, 'host', this.pool.master),
|
||||
|
||||
154
CHANGELOG.md
154
CHANGELOG.md
@@ -1,11 +1,161 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.71.1 (2022-06-13)**
|
||||
## **5.74.0** (2022-08-31)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Enhancements
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Home/Storage] Show which SRs are used for HA state files [#6339](https://github.com/vatesfr/xen-orchestra/issues/6339) (PR [#6384](https://github.com/vatesfr/xen-orchestra/pull/6384))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Backup/Restore] Fix backup list not loading on page load (PR [#6364](https://github.com/vatesfr/xen-orchestra/pull/6364))
|
||||
- [Host] Fix `should not contains property ["ignoreBackup"]` on some host operations (PR [#6362](https://github.com/vatesfr/xen-orchestra/pull/6362))
|
||||
|
||||
### Packages to release
|
||||
|
||||
- @xen-orchestra/fs 3.0.0
|
||||
- vhd-lib 4.0.0
|
||||
- @xen-orchestra/backups 0.27.4
|
||||
- @xen-orchestra/backups-cli 0.7.7
|
||||
- @xen-orchestra/xapi 1.4.2
|
||||
- xen-api 1.2.2
|
||||
- @xen-orchestra/proxy 0.26.0
|
||||
- vhd-cli 0.9.1
|
||||
- xo-vmdk-to-vhd 2.4.3
|
||||
- xo-server 5.101.0
|
||||
- xo-web 5.102.0
|
||||
|
||||
## **5.73.1** (2022-08-04)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Backup] Fix `incorrect backup size in metadata` on each merged VHD (PR [#6331](https://github.com/vatesfr/xen-orchestra/pull/6331))
|
||||
- [Backup] Fix `assertionError [ERR_ASSERTION]: Expected values to be strictly equal` when resuming a merge (PR [#6349](https://github.com/vatesfr/xen-orchestra/pull/6349))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/backups 0.27.3
|
||||
- @xen-orchestra/fs 2.1.0
|
||||
- @xen-orchestra/mixins 0.7.1
|
||||
- @xen-orchestra/proxy 0.25.1
|
||||
- vhd-cli 0.9.0
|
||||
- vhd-lib 3.3.5
|
||||
- xo-server 5.100.1
|
||||
- xo-server-auth-saml 0.10.0
|
||||
- xo-web 5.101.1
|
||||
|
||||
## **5.73.0** (2022-07-29)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [REST API] VDI import now also supports the raw format
|
||||
- HTTPS server can acquire SSL certificate from Let's Encrypt (PR [#6320](https://github.com/vatesfr/xen-orchestra/pull/6320))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Embedded HTTP/HTTPS proxy is now enabled by default
|
||||
- [VM] Display a confirmation modal when stopping/restarting a protected VM (PR [#6295](https://github.com/vatesfr/xen-orchestra/pull/6295))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Home/VM] Show error when deleting VMs failed (PR [#6323](https://github.com/vatesfr/xen-orchestra/pull/6323))
|
||||
- [REST API] Fix broken VDI after VHD import [#6327](https://github.com/vatesfr/xen-orchestra/issues/6327) (PR [#6326](https://github.com/vatesfr/xen-orchestra/pull/6326))
|
||||
- [Netbox] Fix `ipaddr: the address has neither IPv6 nor IPv4 format` error (PR [#6328](https://github.com/vatesfr/xen-orchestra/pull/6328))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @vates/async-each 1.0.0
|
||||
- @xen-orchestra/fs 2.0.0
|
||||
- @xen-orchestra/backups 0.27.2
|
||||
- @xen-orchestra/backups-cli 0.7.6
|
||||
- @xen-orchestra/mixins 0.7.0
|
||||
- @xen-orchestra/xapi 1.4.1
|
||||
- @xen-orchestra/proxy 0.25.0
|
||||
- vhd-cli 0.8.1
|
||||
- vhd-lib 3.3.4
|
||||
- xo-cli 0.14.1
|
||||
- xo-server 5.100.0
|
||||
- xo-web 5.101.0
|
||||
|
||||
## **5.72.1** (2022-07-11)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR] When SR is in maintenance, add "Maintenance mode" badge next to its name (PR [#6313](https://github.com/vatesfr/xen-orchestra/pull/6313))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Tasks] Fix tasks not displayed when running CR backup job [Forum#6038](https://xcp-ng.org/forum/topic/6038/not-seeing-tasks-any-more-as-admin) (PR [#6315](https://github.com/vatesfr/xen-orchestra/pull/6315))
|
||||
- [Backup] Fix failing merge multiple VHDs at once (PR [#6317](https://github.com/vatesfr/xen-orchestra/pull/6317))
|
||||
- [VM/Console] Fix _Connect with SSH/RDP_ when address is IPv6
|
||||
- [Audit] Ignore side-effects free API methods `xoa.check`, `xoa.clearCheckCache` and `xoa.getHVSupportedVersions`
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/backups 0.27.0
|
||||
- @xen-orchestra/backups-cli 0.7.5
|
||||
- @xen-orchestra/proxy 0.23.5
|
||||
- vhd-lib 3.3.2
|
||||
- xo-server 5.98.1
|
||||
- xo-server-audit 0.10.0
|
||||
- xo-web 5.100.0
|
||||
|
||||
## **5.72.0** (2022-06-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Backup] Merge delta backups without copying data when using VHD directories on NFS/SMB/local remote(https://github.com/vatesfr/xen-orchestra/pull/6271))
|
||||
- [Proxies] Ability to copy the proxy access URL (PR [#6287](https://github.com/vatesfr/xen-orchestra/pull/6287))
|
||||
- [SR/Advanced] Ability to enable/disable _Maintenance Mode_ [#6215](https://github.com/vatesfr/xen-orchestra/issues/6215) (PRs [#6308](https://github.com/vatesfr/xen-orchestra/pull/6308), [#6297](https://github.com/vatesfr/xen-orchestra/pull/6297))
|
||||
- [User] User tokens management through XO interface (PR [#6276](https://github.com/vatesfr/xen-orchestra/pull/6276))
|
||||
- [Tasks, VM/General] Self Service users: show tasks related to their pools, hosts, SRs, networks and VMs (PR [#6217](https://github.com/vatesfr/xen-orchestra/pull/6217))
|
||||
|
||||
### Enhancements
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Backup/Restore] Clearer error message when importing a VM backup requires XCP-n/CH >= 8.1 (PR [#6304](https://github.com/vatesfr/xen-orchestra/pull/6304))
|
||||
- [Backup] Users can use VHD directory on any remote type (PR [#6273](https://github.com/vatesfr/xen-orchestra/pull/6273))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [VDI Import] Fix `this._getOrWaitObject is not a function`
|
||||
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
|
||||
- [OVA Import] Fix import stuck after first disk
|
||||
- [File restore] Ignore symbolic links
|
||||
|
||||
### Released packages
|
||||
|
||||
- @vates/event-listeners-manager 1.0.1
|
||||
- @vates/read-chunk 1.0.0
|
||||
- @xen-orchestra/backups 0.26.0
|
||||
- @xen-orchestra/backups-cli 0.7.4
|
||||
- xo-remote-parser 0.9.1
|
||||
- @xen-orchestra/fs 1.1.0
|
||||
- @xen-orchestra/openflow 0.1.2
|
||||
- @xen-orchestra/xapi 1.4.0
|
||||
- @xen-orchestra/proxy 0.23.4
|
||||
- @xen-orchestra/proxy-cli 0.3.1
|
||||
- vhd-lib 3.3.1
|
||||
- vhd-cli 0.8.0
|
||||
- xo-vmdk-to-vhd 2.4.2
|
||||
- xo-server 5.98.0
|
||||
- xo-web 5.99.0
|
||||
|
||||
## **5.71.1 (2022-06-13)**
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Show raw errors to administrators instead of _unknown error from the peer_ (PR [#6260](https://github.com/vatesfr/xen-orchestra/pull/6260))
|
||||
|
||||
### Bug fixes
|
||||
@@ -114,8 +264,6 @@
|
||||
|
||||
## 5.70.0 (2022-04-29)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [VM export] Feat export to `ova` format (PR [#6006](https://github.com/vatesfr/xen-orchestra/pull/6006))
|
||||
|
||||
@@ -7,16 +7,14 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Backup] Merge delta backups without copying data when using VHD directories on NFS/SMB/local remote(https://github.com/vatesfr/xen-orchestra/pull/6271))
|
||||
- [Dashboard/Health] Detect broken VHD chains and display missing parent VDIs (PR [#6356](https://github.com/vatesfr/xen-orchestra/pull/6356))
|
||||
- [Proxy] Ability to bind a licence to an existing proxy (PR [#6348](https://github.com/vatesfr/xen-orchestra/pull/6348))
|
||||
- [Backup] Implement encryption for backup files on storage (PR [#6321](https://github.com/vatesfr/xen-orchestra/pull/6321))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [VDI Import] Fix `this._getOrWaitObject is not a function`
|
||||
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
|
||||
- [OVA Import] Fix import stuck after first disk
|
||||
|
||||
### Packages to release
|
||||
|
||||
> When modifying a package, add it here with its release type.
|
||||
@@ -33,13 +31,10 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- @vates/event-listeners-manager patch
|
||||
- @vates/read-chunk major
|
||||
- @xen-orchestra/backups minor
|
||||
- @xen-orchestra/xapi minor
|
||||
- vhd-lib minor
|
||||
- xo-remote-parser minor
|
||||
- @xen-orchestra/fs minor
|
||||
- @xen-orchestra/mixins minor
|
||||
- vhd-lib patch
|
||||
- xo-server minor
|
||||
- xo-vmdk-to-vhd patch
|
||||
- xo-web minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
10
SECURITY.md
10
SECURITY.md
@@ -4,11 +4,11 @@
|
||||
|
||||
We apply patches and fix security issues for the following versions:
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| XOA `latest` | :white_check_mark: |
|
||||
| XOA `stable` | :white_check_mark: |
|
||||
| `master` branch | :white_check_mark: |
|
||||
| Version | Supported |
|
||||
| --------------- | ------------------ |
|
||||
| XOA `latest` | :white_check_mark: |
|
||||
| XOA `stable` | :white_check_mark: |
|
||||
| `master` branch | :white_check_mark: |
|
||||
| anything else | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
@@ -109,10 +109,13 @@ As a temporary workaround you can increase the timeout higher than the default v
|
||||
:::
|
||||
|
||||
Create the following file:
|
||||
|
||||
```
|
||||
/etc/xo-server/config.httpInactivityTimeout.toml
|
||||
```
|
||||
|
||||
Add the following lines:
|
||||
|
||||
```
|
||||
# XOA Support - Work-around HTTP timeout issue during backups
|
||||
[xapiOptions]
|
||||
|
||||
@@ -24,16 +24,15 @@ Please, do explain:
|
||||
The best way to propose a change to the documentation or code is
|
||||
to create a [GitHub pull request](https://help.github.com/articles/using-pull-requests/).
|
||||
|
||||
:::tip
|
||||
Your pull request should always be against the `master` branch and not against `stable` which is the stable branch!
|
||||
:::
|
||||
|
||||
1. Create a branch for your work
|
||||
2. Add a summary of your changes to `CHANGELOG.md` under the `next` section, if your changes do not relate to an existing changelog item
|
||||
3. Create a pull request for this branch against the `master` branch
|
||||
4. Push into the branch until the pull request is ready to merge
|
||||
5. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
|
||||
6. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
|
||||
1. Fork the [Xen Orchestra repository](https://github.com/vatesfr/xen-orchestra) using the Fork button
|
||||
2. Follow [the documentation](installation.md#from-the-sources) to install and run Xen Orchestra from the sources
|
||||
3. Create a branch for your work
|
||||
4. Edit the source files
|
||||
5. Add a summary of your changes to `CHANGELOG.unreleased.md`, if your changes do not relate to an existing changelog item and update the list of packages that must be released to take your changes into account
|
||||
6. [Create a pull request](https://github.com/vatesfr/xen-orchestra/compare) for this branch against the `master` branch
|
||||
7. Push into the branch until the pull request is ready to merge
|
||||
8. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
|
||||
9. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
|
||||
|
||||
### Issue triage
|
||||
|
||||
|
||||
@@ -143,14 +143,14 @@ curl \
|
||||
|
||||
## VDI Import
|
||||
|
||||
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
|
||||
A VHD or a raw export can be imported on an SR to create a new VDI at `/rest/v0/srs/<sr uuid>/vdis`.
|
||||
|
||||
```bash
|
||||
curl \
|
||||
-X POST \
|
||||
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
|
||||
-T myDisk.vhd \
|
||||
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
|
||||
-T myDisk.raw \
|
||||
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?raw&name_label=my_imported_VDI' \
|
||||
| cat
|
||||
```
|
||||
|
||||
@@ -162,6 +162,7 @@ The following query parameters are supported to customize the created VDI:
|
||||
|
||||
- `name_label`
|
||||
- `name_description`
|
||||
- `raw`: this parameter must be used if importing a raw export instead of a VHD
|
||||
|
||||
## The future
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.
|
||||
:::warning
|
||||
As VxLAN and GRE are protocols using extra encapsulation, they require extra bits on a network packet. If you create a Global Private Network with a default MTU at `1500`, you won't be able to use it "as is" in your VMs, unless you configure a smaller MTU for each virtual interface, in your VM operating system (eg: `1400`).
|
||||
|
||||
If you want something entirely transparent for your VMs, then you'll need to create a network with a MTU of `1546` for GRE or `1550` for VxLAN. However, larger MTU will require capable network equipements.
|
||||
If you want something entirely transparent for your VMs, then you'll need to create a network with a MTU of `1546` for GRE or `1550` for VxLAN. However, larger MTU will require capable network equipments.
|
||||
:::
|
||||
|
||||
### Network creation
|
||||
|
||||
17
docs/xoa.md
17
docs/xoa.md
@@ -206,6 +206,23 @@ In any case, if you lose your password, you can reset the database and get the d
|
||||
|
||||
You can verify that your time is correctly set with the `date` command. To set XOA to your current timezone, use `sudo dpkg-reconfigure tzdata`.
|
||||
|
||||
## Setting a custom NTP server
|
||||
|
||||
By default, XOA is configured to use the standard Debian NTP servers:
|
||||
|
||||
```
|
||||
pool 0.debian.pool.ntp.org iburst
|
||||
pool 1.debian.pool.ntp.org iburst
|
||||
pool 2.debian.pool.ntp.org iburst
|
||||
pool 3.debian.pool.ntp.org iburst
|
||||
```
|
||||
|
||||
If you'd like to use your own NTP server or another pool, you can make the changes directly in `/etc/ntp.conf`.
|
||||
|
||||
You will need to be root to edit this file (or use `sudo`). We recommend adding your custom server to the top of the list, leaving the debian server entries if possible.
|
||||
|
||||
For changes to take effect, you will need to restart NTP: `sudo systemctl restart ntp.service`.
|
||||
|
||||
## Restart the service
|
||||
|
||||
You can restart Xen Orchestra by accessing XOA via SSH (or console) and running `systemctl restart xo-server.service`.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/eslint-parser": "^7.13.8",
|
||||
"@babel/register": "^7.0.0",
|
||||
"babel-jest": "^27.3.1",
|
||||
"babel-jest": "^28.1.2",
|
||||
"benchmark": "^2.1.4",
|
||||
"deptree": "^1.0.0",
|
||||
"eslint": "^8.7.0",
|
||||
@@ -19,8 +19,8 @@
|
||||
"globby": "^13.1.1",
|
||||
"handlebars": "^4.7.6",
|
||||
"husky": "^4.2.5",
|
||||
"jest": "^27.3.1",
|
||||
"lint-staged": "^12.0.3",
|
||||
"jest": "^28.1.2",
|
||||
"lint-staged": "^13.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^2.0.5",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
@@ -77,7 +77,7 @@
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "scripts/run-script.js --parallel build",
|
||||
"ci": "yarn && yarn test-integration",
|
||||
"ci": "yarn && yarn build && yarn test-integration",
|
||||
"clean": "scripts/run-script.js --parallel clean",
|
||||
"dev": "scripts/run-script.js --parallel dev",
|
||||
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
|
||||
@@ -253,7 +253,7 @@ class RegExpNode extends Node {
|
||||
return this.re.toString()
|
||||
}
|
||||
}
|
||||
exports.RegExp = RegExpNode
|
||||
exports.RegExp = exports.RegExpNode = RegExpNode
|
||||
|
||||
class StringNode extends Node {
|
||||
constructor(value) {
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,3 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const { createWriteStream } = require('fs')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
@@ -12,7 +14,7 @@ const createOutputStream = path => {
|
||||
return stream
|
||||
}
|
||||
|
||||
export const writeStream = (input, path) => {
|
||||
exports.writeStream = function writeStream(input, path) {
|
||||
const output = createOutputStream(path)
|
||||
|
||||
return new Promise((resolve, reject) =>
|
||||
@@ -1,11 +1,13 @@
|
||||
import { VhdFile, checkVhdChain } from 'vhd-lib'
|
||||
import getopts from 'getopts'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
'use strict'
|
||||
|
||||
const { VhdFile, checkVhdChain } = require('vhd-lib')
|
||||
const getopts = require('getopts')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { resolve } = require('path')
|
||||
|
||||
const checkVhd = (handler, path) => new VhdFile(handler, path).readHeaderAndFooter()
|
||||
|
||||
export default async rawArgs => {
|
||||
module.exports = async function check(rawArgs) {
|
||||
const { chain, _: args } = getopts(rawArgs, {
|
||||
boolean: ['chain'],
|
||||
default: {
|
||||
@@ -1,9 +1,11 @@
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { openVhd, Constants } from 'vhd-lib'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import omit from 'lodash/omit'
|
||||
'use strict'
|
||||
|
||||
const deepCompareObjects = function (src, dest, path) {
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { openVhd, Constants } = require('vhd-lib')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const omit = require('lodash/omit')
|
||||
|
||||
function deepCompareObjects(src, dest, path) {
|
||||
for (const key of Object.keys(src)) {
|
||||
const srcValue = src[key]
|
||||
const destValue = dest[key]
|
||||
@@ -29,7 +31,7 @@ const deepCompareObjects = function (src, dest, path) {
|
||||
}
|
||||
}
|
||||
|
||||
export default async args => {
|
||||
module.exports = async function compare(args) {
|
||||
if (args.length < 4 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: compare <sourceRemoteUrl> <source VHD> <destionationRemoteUrl> <destination> `
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { openVhd, VhdFile, VhdDirectory } from 'vhd-lib'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import getopts from 'getopts'
|
||||
'use strict'
|
||||
|
||||
export default async rawArgs => {
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { openVhd, VhdFile, VhdDirectory } = require('vhd-lib')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const getopts = require('getopts')
|
||||
|
||||
module.exports = async function copy(rawArgs) {
|
||||
const {
|
||||
directory,
|
||||
help,
|
||||
43
packages/vhd-cli/commands/index.js
Normal file
43
packages/vhd-cli/commands/index.js
Normal file
@@ -0,0 +1,43 @@
|
||||
//
|
||||
// This file has been generated by [index-modules](https://npmjs.com/index-modules)
|
||||
//
|
||||
|
||||
var d = Object.defineProperty
|
||||
function de(o, n, v) {
|
||||
d(o, n, { enumerable: true, value: v })
|
||||
return v
|
||||
}
|
||||
function dl(o, n, g, a) {
|
||||
d(o, n, {
|
||||
configurable: true,
|
||||
enumerable: true,
|
||||
get: function () {
|
||||
return de(o, n, g(a))
|
||||
},
|
||||
})
|
||||
}
|
||||
function r(p) {
|
||||
var v = require(p)
|
||||
return v && v.__esModule
|
||||
? v
|
||||
: typeof v === 'object' || typeof v === 'function'
|
||||
? Object.create(v, { default: { enumerable: true, value: v } })
|
||||
: { default: v }
|
||||
}
|
||||
function e(p, i) {
|
||||
dl(defaults, i, function () {
|
||||
return exports[i].default
|
||||
})
|
||||
dl(exports, i, r, p)
|
||||
}
|
||||
|
||||
d(exports, '__esModule', { value: true })
|
||||
var defaults = de(exports, 'default', {})
|
||||
e('./check.js', 'check')
|
||||
e('./compare.js', 'compare')
|
||||
e('./copy.js', 'copy')
|
||||
e('./info.js', 'info')
|
||||
e('./merge.js', 'merge')
|
||||
e('./raw.js', 'raw')
|
||||
e('./repl.js', 'repl')
|
||||
e('./synthetize.js', 'synthetize')
|
||||
@@ -1,9 +1,13 @@
|
||||
import { Constants, VhdFile } from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
import * as UUID from 'uuid'
|
||||
import humanFormat from 'human-format'
|
||||
import invert from 'lodash/invert.js'
|
||||
'use strict'
|
||||
|
||||
const { Constants, VhdFile } = require('vhd-lib')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { openVhd } = require('vhd-lib/openVhd')
|
||||
const { resolve } = require('path')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const humanFormat = require('human-format')
|
||||
const invert = require('lodash/invert.js')
|
||||
const UUID = require('uuid')
|
||||
|
||||
const { PLATFORMS } = Constants
|
||||
|
||||
@@ -32,8 +36,8 @@ function mapProperties(object, mapping) {
|
||||
return result
|
||||
}
|
||||
|
||||
export default async args => {
|
||||
const vhd = new VhdFile(getHandler({ url: 'file:///' }), resolve(args[0]))
|
||||
async function showDetails(handler, path) {
|
||||
const vhd = new VhdFile(handler, resolve(path))
|
||||
|
||||
try {
|
||||
await vhd.readHeaderAndFooter()
|
||||
@@ -43,6 +47,7 @@ export default async args => {
|
||||
}
|
||||
|
||||
console.log(
|
||||
'footer:',
|
||||
mapProperties(vhd.footer, {
|
||||
currentSize: 'bytes',
|
||||
diskType: 'diskType',
|
||||
@@ -53,6 +58,7 @@ export default async args => {
|
||||
)
|
||||
|
||||
console.log(
|
||||
'header:',
|
||||
mapProperties(vhd.header, {
|
||||
blockSize: 'bytes',
|
||||
parentTimestamp: 'date',
|
||||
@@ -67,3 +73,29 @@ export default async args => {
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async function showList(handler, paths) {
|
||||
let previousUuid
|
||||
for (const path of paths) {
|
||||
await Disposable.use(openVhd(handler, resolve(path)), async vhd => {
|
||||
const uuid = MAPPERS.uuid(vhd.footer.uuid)
|
||||
const fields = [path, MAPPERS.bytes(vhd.footer.currentSize), uuid, MAPPERS.diskType(vhd.footer.diskType)]
|
||||
if (vhd.footer.diskType === Constants.DISK_TYPES.DIFFERENCING) {
|
||||
const parentUuid = MAPPERS.uuid(vhd.header.parentUuid)
|
||||
fields.push(parentUuid === previousUuid ? '<above VHD>' : parentUuid)
|
||||
}
|
||||
previousUuid = uuid
|
||||
console.log(fields.join(' | '))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = async function info(args) {
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
|
||||
if (args.length === 1) {
|
||||
return showDetails(handler, args[0])
|
||||
}
|
||||
|
||||
return showList(handler, args)
|
||||
}
|
||||
@@ -1,16 +1,18 @@
|
||||
import { Bar } from 'cli-progress'
|
||||
import { mergeVhd } from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
'use strict'
|
||||
|
||||
export default async function main(args) {
|
||||
const { Bar } = require('cli-progress')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
const { resolve } = require('path')
|
||||
|
||||
module.exports = async function merge(args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <child VHD> <parent VHD>`
|
||||
}
|
||||
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
let bar
|
||||
await mergeVhd(handler, resolve(args[1]), handler, resolve(args[0]), {
|
||||
await mergeVhdChain(handler, [resolve(args[1]), resolve(args[0])], {
|
||||
onProgress({ done, total }) {
|
||||
if (bar === undefined) {
|
||||
bar = new Bar({
|
||||
@@ -1,11 +1,13 @@
|
||||
import { openVhd } from 'vhd-lib'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
'use strict'
|
||||
|
||||
import { writeStream } from '../_utils'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
const { openVhd } = require('vhd-lib')
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { resolve } = require('path')
|
||||
|
||||
export default async args => {
|
||||
const { writeStream } = require('../_utils')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
|
||||
module.exports = async function raw(args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> [<output raw>]`
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { relative } from 'path'
|
||||
import { start as createRepl } from 'repl'
|
||||
import * as vhdLib from 'vhd-lib'
|
||||
'use strict'
|
||||
|
||||
export default async args => {
|
||||
const { asCallback, fromCallback, fromEvent } = require('promise-toolbox')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { relative } = require('path')
|
||||
const { start: createRepl } = require('repl')
|
||||
const vhdLib = require('vhd-lib')
|
||||
|
||||
module.exports = async function repl(args) {
|
||||
const cwd = process.cwd()
|
||||
const handler = getHandler({ url: 'file://' + cwd })
|
||||
await handler.sync()
|
||||
@@ -1,9 +1,11 @@
|
||||
import path from 'path'
|
||||
import { createSyntheticStream } from 'vhd-lib'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
'use strict'
|
||||
|
||||
export default async function main(args) {
|
||||
const path = require('path')
|
||||
const { createSyntheticStream } = require('vhd-lib')
|
||||
const { createWriteStream } = require('fs')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
|
||||
module.exports = async function synthetize(args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> <output VHD>`
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
'use strict'
|
||||
|
||||
import pkg from '../package.json'
|
||||
const execPromise = require('exec-promise')
|
||||
|
||||
import commands from './commands'
|
||||
const pkg = require('./package.json')
|
||||
const commands = require('./commands').default
|
||||
|
||||
function runCommand(commands, [command, ...args]) {
|
||||
if (command === undefined || command === '-h' || command === '--help') {
|
||||
@@ -1,11 +1,13 @@
|
||||
'use strict'
|
||||
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
const execa = require('execa')
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
|
||||
import command from './commands/info'
|
||||
const command = require('./commands/info')
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "vhd-cli",
|
||||
"version": "0.7.2",
|
||||
"version": "0.9.1",
|
||||
"license": "ISC",
|
||||
"description": "Tools to read/create and merge VHD files",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
|
||||
@@ -16,40 +16,29 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {
|
||||
"vhd-cli": "dist/index.js"
|
||||
"vhd-cli": "./index.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^3.0.0",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"execa": "^5.0.0",
|
||||
"index-modules": "^0.4.3",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.2.1"
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^4.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
},
|
||||
"devDependencies": {
|
||||
"execa": "^4.0.0",
|
||||
"rimraf": "^3.0.2",
|
||||
"tmp": "^0.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,10 +86,19 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} BitmapBlock
|
||||
* @property {number} id
|
||||
* @property {Buffer} bitmap
|
||||
*
|
||||
* @typedef {Object} FullBlock
|
||||
* @property {number} id
|
||||
* @property {Buffer} bitmap
|
||||
* @property {Buffer} data
|
||||
* @property {Buffer} buffer - bitmap + data
|
||||
*
|
||||
* @param {number} blockId
|
||||
* @param {boolean} onlyBitmap
|
||||
* @returns {Buffer}
|
||||
* @returns {Promise<BitmapBlock | FullBlock>}
|
||||
*/
|
||||
readBlock(blockId, onlyBitmap = false) {
|
||||
throw new Error(`reading ${onlyBitmap ? 'bitmap of block' : 'block'} ${blockId} is not implemented`)
|
||||
@@ -334,4 +343,21 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
stream.length = footer.currentSize
|
||||
return stream
|
||||
}
|
||||
|
||||
async containsAllDataOf(child) {
|
||||
await this.readBlockAllocationTable()
|
||||
await child.readBlockAllocationTable()
|
||||
for await (const block of child.blocks()) {
|
||||
const { id, data: childData } = block
|
||||
// block is in child not in parent
|
||||
if (!this.containsBlock(id)) {
|
||||
return false
|
||||
}
|
||||
const { data: parentData } = await this.readBlock(id)
|
||||
if (!childData.equals(parentData)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ const { fuFooter, fuHeader, checksumStruct } = require('../_structs')
|
||||
const { test, set: setBitmap } = require('../_bitmap')
|
||||
const { VhdAbstract } = require('./VhdAbstract')
|
||||
const assert = require('assert')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const promisify = require('promise-toolbox/promisify')
|
||||
const zlib = require('zlib')
|
||||
|
||||
@@ -117,7 +118,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
}
|
||||
|
||||
static async create(handler, path, { flags = 'wx+', compression } = {}) {
|
||||
await handler.mkdir(path)
|
||||
await handler.mktree(path)
|
||||
const vhd = new VhdDirectory(handler, path, { flags, compression })
|
||||
return {
|
||||
dispose: () => {},
|
||||
@@ -131,6 +132,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
this._path = path
|
||||
this._opts = opts
|
||||
this.#compressor = getCompressor(opts?.compression)
|
||||
this.writeBlockAllocationTable = synchronized()(this.writeBlockAllocationTable)
|
||||
}
|
||||
|
||||
async readBlockAllocationTable() {
|
||||
@@ -163,8 +165,10 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
`Can't write a chunk ${partName} in ${this._path} with read permission`
|
||||
)
|
||||
|
||||
// in case of VhdDirectory, we want to create the file if it does not exists
|
||||
const flags = this._opts?.flags === 'r+' ? 'w' : this._opts?.flags
|
||||
const compressed = await this.#compressor.compress(buffer)
|
||||
return this._handler.outputFile(this.#getChunkPath(partName), compressed, this._opts)
|
||||
return this._handler.outputFile(this.#getChunkPath(partName), compressed, { flags })
|
||||
}
|
||||
|
||||
// put block in subdirectories to limit impact when doing directory listing
|
||||
@@ -249,7 +253,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
async mergeBlock(child, blockId, isResumingMerge = false) {
|
||||
const childBlockPath = child._getFullBlockPath?.(blockId)
|
||||
if (
|
||||
childBlockPath !== undefined ||
|
||||
childBlockPath === undefined ||
|
||||
this._handler !== child._handler ||
|
||||
child.compressionType !== this.compressionType ||
|
||||
child.compressionType === 'MIXED'
|
||||
@@ -257,7 +261,12 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
return super.mergeBlock(child, blockId)
|
||||
}
|
||||
try {
|
||||
const blockExists = this.containsBlock(blockId)
|
||||
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
|
||||
if (!blockExists) {
|
||||
setBitmap(this.#blockTable, blockId)
|
||||
await this.writeBlockAllocationTable()
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT' && isResumingMerge === true) {
|
||||
// when resuming, the blocks moved since the last merge state write are
|
||||
|
||||
@@ -72,7 +72,7 @@ test('blocks can be moved', async () => {
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
|
||||
expect((await fs.readFile(recoveredFileName)).equals(await fs.readFile(rawFileName))).toEqual(true)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
@@ -116,7 +116,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
)
|
||||
expect(recovered).toEqual(randomBuffer)
|
||||
expect(recovered.equals(randomBuffer)).toEqual(true)
|
||||
})
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
@@ -134,7 +134,7 @@ test('writeData on empty file', async () => {
|
||||
await newVhd.writeData(0, randomData)
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
expect((await fs.readFile(recoveredFileName)).equals(randomData)).toEqual(true)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
@@ -154,7 +154,7 @@ test('writeData in 2 non-overlaping operations', async () => {
|
||||
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
|
||||
await newVhd.writeData(splitPointSectors, randomData.slice(splitPointSectors * 512))
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
expect((await fs.readFile(recoveredFileName)).equals(randomData)).toEqual(true)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
@@ -175,7 +175,7 @@ test('writeData in 2 overlaping operations', async () => {
|
||||
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
|
||||
await newVhd.writeData(startSecondWrite, randomData.slice(startSecondWrite * 512))
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
expect((await fs.readFile(recoveredFileName)).equals(randomData)).toEqual(true)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
@@ -193,7 +193,7 @@ test('BAT can be extended and blocks moved', async () => {
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await newVhd.writeBlockAllocationTable()
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
|
||||
expect((await fs.readFile(recoveredFileName)).equals(await fs.readFile(rawFileName))).toEqual(true)
|
||||
})
|
||||
|
||||
test('Can coalesce block', async () => {
|
||||
@@ -227,13 +227,13 @@ test('Can coalesce block', async () => {
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
let parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
let childBlockData = (await childFileVhd.readBlock(0)).data
|
||||
expect(parentBlockData).toEqual(childBlockData)
|
||||
expect(parentBlockData.equals(childBlockData)).toEqual(true)
|
||||
|
||||
await parentVhd.mergeBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
childBlockData = (await childDirectoryVhd.readBlock(0)).data
|
||||
expect(parentBlockData).toEqual(childBlockData)
|
||||
expect(parentBlockData.equals(childBlockData)).toEqual(true)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -83,6 +83,7 @@ exports.VhdFile = class VhdFile extends VhdAbstract {
|
||||
}
|
||||
|
||||
static async open(handler, path, { flags, checkSecondFooter = true } = {}) {
|
||||
assert(!handler.isEncrypted, `VHDFile implementation is not compatible with encrypted remote`)
|
||||
const fd = await handler.openFile(path, flags ?? 'r+')
|
||||
const vhd = new VhdFile(handler, fd)
|
||||
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
|
||||
|
||||
@@ -55,7 +55,7 @@ test('It can read block and parent locator from a synthetic vhd', async () => {
|
||||
|
||||
await bigVhd.readHeaderAndFooter()
|
||||
|
||||
const syntheticVhd = yield VhdSynthetic.open(handler, [smallVhdFileName, bigVhdFileName])
|
||||
const syntheticVhd = yield VhdSynthetic.open(handler, [bigVhdFileName, smallVhdFileName])
|
||||
await syntheticVhd.readBlockAllocationTable()
|
||||
|
||||
expect(syntheticVhd.header.diskType).toEqual(bigVhd.header.diskType)
|
||||
@@ -65,25 +65,24 @@ test('It can read block and parent locator from a synthetic vhd', async () => {
|
||||
const buf = Buffer.alloc(syntheticVhd.sectorsPerBlock * SECTOR_SIZE, 0)
|
||||
let content = (await syntheticVhd.readBlock(0)).data
|
||||
await handler.read(smallRawFileName, buf, 0)
|
||||
expect(content).toEqual(buf)
|
||||
expect(content.equals(buf)).toEqual(true)
|
||||
|
||||
content = (await syntheticVhd.readBlock(1)).data
|
||||
await handler.read(smallRawFileName, buf, buf.length)
|
||||
expect(content).toEqual(buf)
|
||||
expect(content.equals(buf)).toEqual(true)
|
||||
|
||||
// the next one from big
|
||||
|
||||
content = (await syntheticVhd.readBlock(2)).data
|
||||
await handler.read(bigRawFileName, buf, buf.length * 2)
|
||||
expect(content).toEqual(buf)
|
||||
expect(content.equals(buf)).toEqual(true)
|
||||
|
||||
content = (await syntheticVhd.readBlock(3)).data
|
||||
await handler.read(bigRawFileName, buf, buf.length * 3)
|
||||
expect(content).toEqual(buf)
|
||||
expect(content.equals(buf)).toEqual(true)
|
||||
|
||||
// the parent locator should the one of the root vhd
|
||||
const parentLocator = await syntheticVhd.readParentLocator(0)
|
||||
expect(parentLocator.platformCode).toEqual(PLATFORMS.W2KU)
|
||||
expect(Buffer.from(parentLocator.data, 'utf-8').toString()).toEqual('I am in the big one')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -15,14 +15,13 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
#vhds = []
|
||||
|
||||
get header() {
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = this.#vhds[0]
|
||||
// this the most recent vhd
|
||||
const vhd = this.#vhds[this.#vhds.length - 1]
|
||||
|
||||
// this is the root VHD
|
||||
const rootVhd = this.#vhds[this.#vhds.length - 1]
|
||||
const rootVhd = this.#vhds[0]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: set parentLocatorEntry-s in header
|
||||
return {
|
||||
...vhd.header,
|
||||
parentLocatorEntry: cloneDeep(rootVhd.header.parentLocatorEntry),
|
||||
@@ -34,10 +33,13 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
|
||||
get footer() {
|
||||
// this is the root VHD
|
||||
const rootVhd = this.#vhds[this.#vhds.length - 1]
|
||||
// this the most recent vhd
|
||||
const vhd = this.#vhds[this.#vhds.length - 1]
|
||||
|
||||
// this is the oldest VHD
|
||||
const rootVhd = this.#vhds[0]
|
||||
return {
|
||||
...this.#vhds[0].footer,
|
||||
...vhd.footer,
|
||||
dataOffset: FOOTER_SIZE,
|
||||
diskType: rootVhd.footer.diskType,
|
||||
}
|
||||
@@ -77,17 +79,21 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
await asyncMap(vhds, vhd => vhd.readHeaderAndFooter())
|
||||
|
||||
for (let i = 0, n = vhds.length - 1; i < n; ++i) {
|
||||
const child = vhds[i]
|
||||
const parent = vhds[i + 1]
|
||||
const parent = vhds[i]
|
||||
const child = vhds[i + 1]
|
||||
assert.strictEqual(child.footer.diskType, DISK_TYPES.DIFFERENCING)
|
||||
assert.strictEqual(UUID.stringify(child.header.parentUuid), UUID.stringify(parent.footer.uuid))
|
||||
}
|
||||
}
|
||||
|
||||
#getVhdWithBlock(blockId) {
|
||||
const index = this.#vhds.findIndex(vhd => vhd.containsBlock(blockId))
|
||||
assert(index !== -1, `no such block ${blockId}`)
|
||||
return this.#vhds[index]
|
||||
for (let i = this.#vhds.length - 1; i >= 0; i--) {
|
||||
const vhd = this.#vhds[i]
|
||||
if (vhd.containsBlock(blockId)) {
|
||||
return vhd
|
||||
}
|
||||
}
|
||||
assert(false, `no such block ${blockId}`)
|
||||
}
|
||||
|
||||
async readBlock(blockId, onlyBitmap = false) {
|
||||
@@ -106,6 +112,11 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
const vhd = this.#getVhdWithBlock(blockId)
|
||||
return vhd?._getFullBlockPath(blockId)
|
||||
}
|
||||
|
||||
// return true if all the vhds ar an instance of cls
|
||||
checkVhdsClass(cls) {
|
||||
return this.#vhds.every(vhd => vhd instanceof cls)
|
||||
}
|
||||
}
|
||||
|
||||
// add decorated static method
|
||||
@@ -115,7 +126,7 @@ VhdSynthetic.fromVhdChain = Disposable.factory(async function* fromVhdChain(hand
|
||||
const vhds = []
|
||||
do {
|
||||
vhd = yield openVhd(handler, vhdPath)
|
||||
vhds.push(vhd)
|
||||
vhds.unshift(vhd) // from oldest to most recent
|
||||
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
|
||||
} while (vhd.footer.diskType !== DISK_TYPES.DYNAMIC)
|
||||
|
||||
|
||||
@@ -12,11 +12,14 @@ exports.resolveVhdAlias = async function resolveVhdAlias(handler, filename) {
|
||||
if (!isVhdAlias(filename)) {
|
||||
return filename
|
||||
}
|
||||
const size = await handler.getSize(filename)
|
||||
if (size > ALIAS_MAX_PATH_LENGTH) {
|
||||
// seems reasonnable for a relative path
|
||||
throw new Error(`The alias file ${filename} is too big (${size} bytes)`)
|
||||
if (!handler.isEncrypted) {
|
||||
const size = await handler.getSize(filename)
|
||||
if (size > ALIAS_MAX_PATH_LENGTH) {
|
||||
// seems reasonnable for a relative path
|
||||
throw new Error(`The alias file ${filename} is too big (${size} bytes)`)
|
||||
}
|
||||
}
|
||||
|
||||
const aliasContent = (await handler.readFile(filename)).toString().trim()
|
||||
// also handle circular references and unreasonnably long chains
|
||||
if (isVhdAlias(aliasContent)) {
|
||||
|
||||
@@ -8,7 +8,7 @@ const { Disposable } = require('promise-toolbox')
|
||||
|
||||
module.exports = async function chain(parentHandler, parentPath, childHandler, childPath, force = false) {
|
||||
await Disposable.use(
|
||||
[openVhd(parentHandler, parentPath), openVhd(childHandler, childPath)],
|
||||
[openVhd(parentHandler, parentPath), openVhd(childHandler, childPath, { flags: 'r+' })],
|
||||
async ([parentVhd, childVhd]) => {
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
'use strict'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { parseVhdStream } = require('./parseVhdStream.js')
|
||||
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
|
||||
|
||||
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
|
||||
const vhd = yield VhdDirectory.create(handler, path, { compression })
|
||||
await asyncEach(
|
||||
@@ -50,7 +53,7 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
|
||||
}
|
||||
} catch (error) {
|
||||
// cleanup on error
|
||||
await handler.rmtree(path)
|
||||
await handler.rmtree(path).catch(warn)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ exports.checkVhdChain = require('./checkChain')
|
||||
exports.createReadableSparseStream = require('./createReadableSparseStream')
|
||||
exports.createVhdStreamWithLength = require('./createVhdStreamWithLength')
|
||||
exports.createVhdDirectoryFromStream = require('./createVhdDirectoryFromStream').createVhdDirectoryFromStream
|
||||
const { mergeVhd } = require('./merge')
|
||||
exports.mergeVhd = mergeVhd
|
||||
exports.peekFooterFromVhdStream = require('./peekFooterFromVhdStream')
|
||||
exports.openVhd = require('./openVhd').openVhd
|
||||
exports.VhdAbstract = require('./Vhd/VhdAbstract').VhdAbstract
|
||||
|
||||
@@ -8,8 +8,8 @@ const tmp = require('tmp')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
|
||||
const { VhdFile, chainVhd, mergeVhd } = require('./index')
|
||||
const { _cleanupVhds: cleanupVhds } = require('./merge')
|
||||
const { VhdFile, chainVhd } = require('./index')
|
||||
const { _cleanupVhds: cleanupVhds, mergeVhdChain } = require('./merge')
|
||||
|
||||
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
|
||||
|
||||
@@ -42,7 +42,7 @@ test('merge works in normal cases', async () => {
|
||||
await checkFile(`${tempDir}/${parentFileName}`)
|
||||
|
||||
// merge
|
||||
await mergeVhd(handler, parentFileName, handler, child1FileName)
|
||||
await mergeVhdChain(handler, [parentFileName, child1FileName])
|
||||
|
||||
// check that the merged vhd is still valid
|
||||
await checkFile(`${tempDir}/${child1FileName}`)
|
||||
@@ -65,7 +65,7 @@ test('merge works in normal cases', async () => {
|
||||
}
|
||||
})
|
||||
|
||||
test('it can resume a merge ', async () => {
|
||||
test('it can resume a simple merge ', async () => {
|
||||
const mbOfFather = 8
|
||||
const mbOfChildren = 4
|
||||
const parentRandomFileName = `${tempDir}/randomfile`
|
||||
@@ -95,7 +95,7 @@ test('it can resume a merge ', async () => {
|
||||
})
|
||||
)
|
||||
// expect merge to fail since child header is not ok
|
||||
await expect(async () => await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
|
||||
await expect(async () => await mergeVhdChain(handler, ['parent.vhd', 'child1.vhd'])).rejects.toThrow()
|
||||
|
||||
await handler.unlink('.parent.vhd.merge.json')
|
||||
await handler.writeFile(
|
||||
@@ -110,7 +110,7 @@ test('it can resume a merge ', async () => {
|
||||
})
|
||||
)
|
||||
// expect merge to fail since parent header is not ok
|
||||
await expect(async () => await mergeVhd(handler, 'parent.vhd', handler, ['child1.vhd'])).rejects.toThrow()
|
||||
await expect(async () => await mergeVhdChain(handler, ['parent.vhd', 'child1.vhd'])).rejects.toThrow()
|
||||
|
||||
// break the end footer of parent
|
||||
const size = await handler.getSize('parent.vhd')
|
||||
@@ -137,7 +137,7 @@ test('it can resume a merge ', async () => {
|
||||
)
|
||||
|
||||
// really merge
|
||||
await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await mergeVhdChain(handler, ['parent.vhd', 'child1.vhd'])
|
||||
|
||||
// reload header footer and block allocation table , they should succed
|
||||
await childVhd.readHeaderAndFooter()
|
||||
@@ -159,6 +159,73 @@ test('it can resume a merge ', async () => {
|
||||
}
|
||||
})
|
||||
|
||||
test('it can resume a multiple merge ', async () => {
|
||||
const mbOfFather = 8
|
||||
const mbOfChildren = 6
|
||||
const mbOfGrandChildren = 4
|
||||
const parentRandomFileName = `${tempDir}/randomfile`
|
||||
const childRandomFileName = `${tempDir}/small_randomfile`
|
||||
const grandChildRandomFileName = `${tempDir}/another_small_randomfile`
|
||||
const parentFileName = `${tempDir}/parent.vhd`
|
||||
const childFileName = `${tempDir}/child.vhd`
|
||||
const grandChildFileName = `${tempDir}/grandchild.vhd`
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
await createRandomFile(parentRandomFileName, mbOfFather)
|
||||
await convertFromRawToVhd(parentRandomFileName, parentFileName)
|
||||
|
||||
await createRandomFile(childRandomFileName, mbOfChildren)
|
||||
await convertFromRawToVhd(childRandomFileName, childFileName)
|
||||
await chainVhd(handler, parentFileName, handler, childFileName, true)
|
||||
|
||||
await createRandomFile(grandChildRandomFileName, mbOfGrandChildren)
|
||||
await convertFromRawToVhd(grandChildRandomFileName, grandChildFileName)
|
||||
await chainVhd(handler, childFileName, handler, grandChildFileName, true)
|
||||
|
||||
const parentVhd = new VhdFile(handler, parentFileName)
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
|
||||
const childVhd = new VhdFile(handler, childFileName)
|
||||
await childVhd.readHeaderAndFooter()
|
||||
|
||||
const grandChildVhd = new VhdFile(handler, grandChildFileName)
|
||||
await grandChildVhd.readHeaderAndFooter()
|
||||
|
||||
await handler.writeFile(
|
||||
`${tempDir}/.parent.vhd.merge.json`,
|
||||
JSON.stringify({
|
||||
parent: {
|
||||
header: parentVhd.header.checksum,
|
||||
},
|
||||
child: {
|
||||
header: childVhd.header.checksum,
|
||||
},
|
||||
currentBlock: 1,
|
||||
})
|
||||
)
|
||||
|
||||
// should fail since the merge state file has only data of parent and child
|
||||
await expect(
|
||||
async () => await mergeVhdChain(handler, [parentFileName, childFileName, grandChildFileName])
|
||||
).rejects.toThrow()
|
||||
// merge
|
||||
await handler.unlink(`${tempDir}/.parent.vhd.merge.json`)
|
||||
await handler.writeFile(
|
||||
`${tempDir}/.parent.vhd.merge.json`,
|
||||
JSON.stringify({
|
||||
parent: {
|
||||
header: parentVhd.header.checksum,
|
||||
},
|
||||
child: {
|
||||
header: grandChildVhd.header.checksum,
|
||||
},
|
||||
currentBlock: 1,
|
||||
childPath: [childVhd, grandChildVhd],
|
||||
})
|
||||
)
|
||||
// it should succeed
|
||||
await mergeVhdChain(handler, [parentFileName, childFileName, grandChildFileName])
|
||||
})
|
||||
|
||||
test('it merge multiple child in one pass ', async () => {
|
||||
const mbOfFather = 8
|
||||
const mbOfChildren = 6
|
||||
@@ -182,7 +249,7 @@ test('it merge multiple child in one pass ', async () => {
|
||||
await chainVhd(handler, childFileName, handler, grandChildFileName, true)
|
||||
|
||||
// merge
|
||||
await mergeVhd(handler, parentFileName, handler, [grandChildFileName, childFileName])
|
||||
await mergeVhdChain(handler, [parentFileName, childFileName, grandChildFileName])
|
||||
|
||||
// check that vhd is still valid
|
||||
await checkFile(grandChildFileName)
|
||||
@@ -217,8 +284,7 @@ test('it cleans vhd mergedfiles', async () => {
|
||||
await handler.writeFile('child2', 'child2Data')
|
||||
await handler.writeFile('child3', 'child3Data')
|
||||
|
||||
// childPath is from the grand children to the children
|
||||
await cleanupVhds(handler, 'parent', ['child3', 'child2', 'child1'], { remove: true })
|
||||
await cleanupVhds(handler, ['parent', 'child1', 'child2', 'child3'], { merge: true, removeUnused: true })
|
||||
|
||||
// only child3 should stay, with the data of parent
|
||||
const [child3, ...other] = await handler.list('.')
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user