Compare commits

..

2 Commits

Author SHA1 Message Date
Florent Beauchamp
d6d7e87fe5 fix: remove root need for openVhd.integ.spec.js and merge.integ.spec.js 2021-11-12 11:24:00 +01:00
Florent Beauchamp
00f02c795f feat(vhd-lib): tests shouldn't need root access to run 2021-11-10 14:06:24 +01:00
99 changed files with 1200 additions and 3141 deletions

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,68 +0,0 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/async-each
[![Package Version](https://badgen.net/npm/v/@vates/async-each)](https://npmjs.org/package/@vates/async-each) ![License](https://badgen.net/npm/license/@vates/async-each) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/async-each)](https://bundlephobia.com/result?p=@vates/async-each) [![Node compatibility](https://badgen.net/npm/node/@vates/async-each)](https://npmjs.org/package/@vates/async-each)
> Run async fn for each item in (async) iterable
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
```
> npm install --save @vates/async-each
```
## Usage
### `asyncEach(iterable, iteratee, [opts])`
Executes `iteratee` in order for each value yielded by `iterable`.
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
`iterable` must be an iterable or async iterable.
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
- `value`: the value yielded by `iterable`
- `index`: the 0-based index for this value
- `iterable`: the iterable itself
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
```js
import { asyncEach } from '@vates/async-each'
const contents = []
await asyncEach(
['foo.txt', 'bar.txt', 'baz.txt'],
async function (filename, i) {
contents[i] = await readFile(filename)
},
{
// reads two files at a time
concurrency: 2,
}
)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -1,35 +0,0 @@
### `asyncEach(iterable, iteratee, [opts])`
Executes `iteratee` in order for each value yielded by `iterable`.
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
`iterable` must be an iterable or async iterable.
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
- `value`: the value yielded by `iterable`
- `index`: the 0-based index for this value
- `iterable`: the iterable itself
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
```js
import { asyncEach } from '@vates/async-each'
const contents = []
await asyncEach(
['foo.txt', 'bar.txt', 'baz.txt'],
async function (filename, i) {
contents[i] = await readFile(filename)
},
{
// reads two files at a time
concurrency: 2,
}
)
```

View File

@@ -1,99 +0,0 @@
'use strict'
const noop = Function.prototype
class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []
let running = 0
let index = 0
let onAbort
if (signal !== undefined) {
onAbort = () => {
onRejectedWrapper(new Error('asyncEach aborted'))
}
signal.addEventListener('abort', onAbort)
}
const clean = () => {
onFulfilled = onRejected = noop
if (onAbort !== undefined) {
signal.removeEventListener('abort', onAbort)
}
}
resolve = (resolve =>
function resolveAndClean(value) {
resolve(value)
clean()
})(resolve)
reject = (reject =>
function rejectAndClean(reason) {
reject(reason)
clean()
})(reject)
let onFulfilled = value => {
--running
next()
}
const onFulfilledWrapper = value => onFulfilled(value)
let onRejected = stopOnError
? reject
: error => {
--running
errors.push(error)
next()
}
const onRejectedWrapper = reason => onRejected(reason)
let nextIsRunning = false
let next = async () => {
if (nextIsRunning) {
return
}
nextIsRunning = true
if (running < concurrency) {
const cursor = await it.next()
if (cursor.done) {
next = () => {
if (running === 0) {
if (errors.length === 0) {
resolve()
} else {
reject(new AggregateError(errors))
}
}
}
} else {
++running
try {
const result = iteratee.call(this, cursor.value, index++, iterable)
let then
if (result != null && typeof result === 'object' && typeof (then = result.then) === 'function') {
then.call(result, onFulfilledWrapper, onRejectedWrapper)
} else {
onFulfilled(result)
}
} catch (error) {
onRejected(error)
}
}
nextIsRunning = false
return next()
}
nextIsRunning = false
}
next()
})
}

View File

@@ -1,99 +0,0 @@
'use strict'
/* eslint-env jest */
const { asyncEach } = require('./')
const randomDelay = (max = 10) =>
new Promise(resolve => {
setTimeout(resolve, Math.floor(Math.random() * max + 1))
})
const rejectionOf = p =>
new Promise((resolve, reject) => {
p.then(reject, resolve)
})
describe('asyncEach', () => {
const thisArg = 'qux'
const values = ['foo', 'bar', 'baz']
Object.entries({
'sync iterable': () => values,
'async iterable': async function* () {
for (const value of values) {
await randomDelay()
yield value
}
},
}).forEach(([what, getIterable]) =>
describe('with ' + what, () => {
let iterable
beforeEach(() => {
iterable = getIterable()
})
it('works', async () => {
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee)
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
;[1, 2, 4].forEach(concurrency => {
it('respects a concurrency of ' + concurrency, async () => {
let running = 0
await asyncEach(
values,
async () => {
++running
expect(running).toBeLessThanOrEqual(concurrency)
await randomDelay()
--running
},
{ concurrency }
)
})
})
it('stops on first error when stopOnError is true', async () => {
const error = new Error()
const iteratee = jest.fn((_, i) => {
if (i === 1) {
throw error
}
})
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
it('rejects AggregateError when stopOnError is false', async () => {
const errors = []
const iteratee = jest.fn(() => {
const error = new Error()
errors.push(error)
throw error
})
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
expect(error.errors).toEqual(errors)
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
it('can be interrupted with an AbortSignal', async () => {
const ac = new AbortController()
const iteratee = jest.fn((_, i) => {
if (i === 1) {
ac.abort()
}
})
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
})
})
)
})

View File

@@ -1,34 +0,0 @@
{
"private": false,
"name": "@vates/async-each",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/async-each",
"description": "Run async fn for each item in (async) iterable",
"keywords": [
"array",
"async",
"collection",
"each",
"for",
"foreach",
"iterable",
"iterator"
],
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/async-each",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.0",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/backups": "^0.15.1",
"@xen-orchestra/fs": "^0.18.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.6.1",
"version": "0.6.0",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -3,19 +3,19 @@ const Disposable = require('promise-toolbox/Disposable.js')
const fromCallback = require('promise-toolbox/fromCallback.js')
const fromEvent = require('promise-toolbox/fromEvent.js')
const pDefer = require('promise-toolbox/defer.js')
const { dirname, join, normalize, resolve } = require('path')
const pump = require('pump')
const { basename, dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdSynthetic } = require('vhd-lib')
const { createSyntheticStream, mergeVhd, VhdFile } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isMetadataFile, isVhdFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
@@ -77,6 +77,48 @@ class RemoteAdapter {
return this._handler
}
async _deleteVhd(path) {
const handler = this._handler
const vhds = await asyncMapSettled(
await handler.list(dirname(path), {
filter: isVhdFile,
prependDir: true,
}),
async path => {
try {
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
path,
}
} catch (error) {
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
// they are probably inconsequent to the backup process and should not
// fail it.
warn(`BackupNg#_deleteVhd ${path}`, { error })
}
}
)
const base = basename(path)
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
if (child === undefined) {
await handler.unlink(path)
return 0
}
try {
const childPath = child.path
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
await handler.rename(path, childPath)
return mergedDataSize
} catch (error) {
handler.unlink(path).catch(warn)
throw error
}
}
async _findPartition(devicePath, partitionId) {
const partitions = await listPartitions(devicePath)
const partition = partitions.find(_ => _.id === partitionId)
@@ -213,7 +255,7 @@ class RemoteAdapter {
const handler = this._handler
// unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => VhdAbstract.unlink(handler, _filename))
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
}
async deleteMetadataBackup(backupId) {
@@ -312,17 +354,6 @@ class RemoteAdapter {
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
}
// this function will be the one where we plug the logic of the storage format by fs type/user settings
// if the file is named .vhd => vhd
// if the file is named alias.vhd => alias to a vhd
getVhdFileName(baseName) {
if (this._handler.type === 's3') {
return `${baseName}.alias.vhd` // we want an alias to a vhddirectory
}
return `${baseName}.vhd`
}
async listAllVmBackups() {
const handler = this._handler
@@ -467,24 +498,6 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
let dataPath = path
if (path.endsWith('.alias.vhd')) {
await createVhdDirectoryFromStream(handler, `${dirname(path)}/data/${uuidv4()}.vhd`, input, {
concurrency: 16,
async validator() {
await input.task
return validator.apply(this, arguments)
},
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {
await this.outputStream(dataPath, input, { checksum, validator })
}
}
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
await this._handler.outputStream(path, input, {
checksum,
@@ -496,52 +509,6 @@ class RemoteAdapter {
})
}
async _createSyntheticStream(handler, paths) {
let disposableVhds = []
// if it's a path : open all hierarchy of parent
if (typeof paths === 'string') {
let vhd,
vhdPath = paths
do {
const disposable = await openVhd(handler, vhdPath)
vhd = disposable.value
disposableVhds.push(disposable)
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== Constants.DISK_TYPES.DYNAMIC)
} else {
// only open the list of path given
disposableVhds = paths.map(path => openVhd(handler, path))
}
// I don't want the vhds to be disposed on return
// but only when the stream is done ( or failed )
const disposables = await Disposable.all(disposableVhds)
const vhds = disposables.value
let disposed = false
const disposeOnce = async () => {
if (!disposed) {
disposed = true
try {
await disposables.dispose()
} catch (error) {
warn('_createSyntheticStream: failed to dispose VHDs', { error })
}
}
}
const synthetic = new VhdSynthetic(vhds)
await synthetic.readHeaderAndFooter()
await synthetic.readBlockAllocationTable()
const stream = await synthetic.stream()
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
stream.on('error', disposeOnce)
return stream
}
async readDeltaVmBackup(metadata) {
const handler = this._handler
const { vbds, vdis, vhds, vifs, vm } = metadata
@@ -549,7 +516,7 @@ class RemoteAdapter {
const streams = {}
await asyncMapSettled(Object.keys(vdis), async id => {
streams[`${id}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[id]))
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
})
return {

View File

@@ -1,390 +0,0 @@
/* eslint-env jest */
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const crypto = require('crypto')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
let tempDir, adapter, handler, jobId, vdiId, basePath
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
handler = getHandler({ url: `file://${tempDir}` })
await handler.sync()
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
basePath = `vdis/${jobId}/${vdiId}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
await handler.forget()
})
const uniqueId = () => crypto.randomBytes(16).toString('hex')
async function generateVhd(path, opts = {}) {
let vhd
const dataPath = opts.useAlias ? path + '.data' : path
if (opts.mode === 'directory') {
await handler.mkdir(dataPath)
vhd = new VhdDirectory(handler, dataPath)
} else {
const fd = await handler.openFile(dataPath, 'wx')
vhd = new VhdFile(handler, fd)
}
vhd.header = { ...VHDHEADER, ...opts.header }
vhd.footer = { ...VHDFOOTER, ...opts.footer }
vhd.footer.uuid = Buffer.from(crypto.randomBytes(16))
if (vhd.header.parentUnicodeName) {
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
} else {
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
}
if (opts.useAlias === true) {
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
}
await vhd.writeBlockAllocationTable()
await vhd.writeHeader()
await vhd.writeFooter()
return vhd
}
test('It remove broken vhd', async () => {
// todo also tests a directory and an alias
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const onLog = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
})
test('it remove vhd with missing or multiple ancestors', async () => {
// one with a broken parent
await generateVhd(`${basePath}/abandonned.vhd`, {
header: {
parentUnicodeName: 'gone.vhd',
parentUid: Buffer.from(crypto.randomBytes(16)),
},
})
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
// clean
let loggued = ''
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
],
})
)
await generateVhd(`${basePath}/abandonned.vhd`)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
let loggued = ''
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
// a grand child
await generateVhd(`${basePath}/grandchild.vhd`, {
header: {
parentUnicodeName: 'child.vhd',
parentUid: child.footer.uuid,
},
})
let loggued = ''
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused\n`)
loggued = ''
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [unused, merging] = loggued.split('\n')
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(2)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
})
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [
`${basePath}/orphan.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
// a merge in progress file
await handler.writeFile(
`${basePath}/.orphan.vhd.merge.json`,
JSON.stringify({
parent: {
header: orphan.header.checksum,
},
child: {
header: child.header.checksum,
},
})
)
// a unfinished merging
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(1)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
})
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
// the message an resulting files should be identical to the output with vhd files which is tested independantly
describe('tests mulitple combination ', () => {
for (const useAlias of [true, false]) {
for (const vhdMode of ['file', 'directory']) {
test(`alias : ${useAlias}, mode: ${vhdMode}`, async () => {
// a broken VHD
const brokenVhdDataPath = basePath + useAlias ? 'broken.data' : 'broken.vhd'
if (vhdMode === 'directory') {
await handler.mkdir(brokenVhdDataPath)
} else {
await handler.writeFile(brokenVhdDataPath, 'notreallyavhd')
}
if (useAlias) {
await VhdAbstract.createAlias(handler, 'broken.alias.vhd', brokenVhdDataPath)
}
// a vhd non referenced in metada
await generateVhd(`${basePath}/nonreference.vhd`, { useAlias, mode: vhdMode })
// an abandonded delta vhd without its parent
await generateVhd(`${basePath}/abandonned.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'gone.vhd',
parentUid: crypto.randomBytes(16),
},
})
// an ancestor of a vhd present in metadata
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
mode: vhdMode,
})
const child = await generateVhd(`${basePath}/child.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: ancestor.footer.uuid,
},
})
// a grand child vhd in metadata
await generateVhd(`${basePath}/grandchild.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: child.footer.uuid,
},
})
// an older parent that was merging in clean
const cleanAncestor = await generateVhd(`${basePath}/cleanAncestor.vhd`, {
useAlias,
mode: vhdMode,
})
// a clean vhd in metadata
const clean = await generateVhd(`${basePath}/clean.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: cleanAncestor.footer.uuid,
},
})
await handler.writeFile(
`${basePath}/.cleanAncestor.vhd${useAlias ? '.alias.vhd' : ''}.merge.json`,
JSON.stringify({
parent: {
header: cleanAncestor.header.checksum,
},
child: {
header: clean.header.checksum,
},
})
)
// the metadata file
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
// grand child and clean vhd should not have changed
const survivors = await handler.list(basePath)
// console.log(survivors)
if (useAlias) {
// the goal of the alias : do not move a full folder
expect(survivors).toContain('ancestor.vhd.data')
expect(survivors).toContain('grandchild.vhd.data')
expect(survivors).toContain('cleanAncestor.vhd.data')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(6)
} else {
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
expect(survivors).toContain('grandchild.vhd')
expect(survivors.length).toEqual(3)
}
})
}
}
})

View File

@@ -1,14 +1,13 @@
const assert = require('assert')
const sum = require('lodash/sum')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { VhdFile, mergeVhd } = require('vhd-lib')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants.js')
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
// chain is an array of VHDs from child to parent
//
@@ -66,12 +65,12 @@ async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
clearInterval(handle)
await Promise.all([
VhdAbstract.rename(handler, parent, child),
handler.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
onLog(`the VHD ${child} is unused`)
if (remove) {
onLog(`deleting unused VHD ${child}`)
return VhdAbstract.unlink(handler, child)
return handler.unlink(child)
}
}),
])
@@ -138,55 +137,53 @@ exports.cleanVm = async function cleanVm(
// remove broken VHDs
await asyncMap(vhdsList.vhds, async path => {
try {
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !vhdsList.interruptedVhds.has(path) }), vhd => {
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter(!vhdsList.interruptedVhds.has(path))
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
})
vhdChildren[parent] = path
}
} catch (error) {
onLog(`error while checking the VHD with path ${path}`, { error })
if (error?.code === 'ERR_ASSERTION' && remove) {
onLog(`deleting broken ${path}`)
return VhdAbstract.unlink(handler, path)
await handler.unlink(path)
}
}
})
// @todo : add check for data folder of alias not referenced in a valid alias
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhdPath => {
const parent = vhdParents[vhdPath]
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhdPath]
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
vhds.delete(vhd)
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
onLog(`the parent ${parent} of the VHD ${vhd} is missing`)
if (remove) {
onLog(`deleting orphan VHD ${vhdPath}`)
deletions.push(VhdAbstract.unlink(handler, vhdPath))
onLog(`deleting orphan VHD ${vhd}`)
deletions.push(handler.unlink(vhd))
}
}
}
@@ -256,26 +253,15 @@ exports.cleanVm = async function cleanVm(
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
})()
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
const shouldComputeSize = linkedVhds.every(vhd => vhd instanceof VhdFile)
if (shouldComputeSize) {
try {
await Disposable.use(Disposable.all(linkedVhds.map(vhdPath => openVhd(handler, vhdPath))), async vhds => {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
size = sum(sizes)
})
} catch (error) {
onLog(`failed to get size of ${json}`, { error })
}
}
size = await asyncMap(linkedVhds, vhd => handler.getSize(vhd)).then(sum, error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`)
if (remove) {
@@ -338,7 +324,7 @@ exports.cleanVm = async function cleanVm(
onLog(`the VHD ${vhd} is unused`)
if (remove) {
onLog(`deleting unused VHD ${vhd}`)
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
unusedVhdsDeletion.push(handler.unlink(vhd))
}
}

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.16.0",
"version": "0.15.1",
"engines": {
"node": ">=14.6"
},
@@ -20,7 +20,7 @@
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
@@ -35,12 +35,11 @@
"promise-toolbox": "^0.20.0",
"proper-lockfile": "^4.1.2",
"pump": "^3.0.0",
"uuid": "^8.3.2",
"vhd-lib": "^2.0.1",
"vhd-lib": "^1.3.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.8.4"
"@xen-orchestra/xapi": "^0.8.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -1,92 +0,0 @@
// a valid footer of a 2
exports.VHDFOOTER = {
cookie: 'conectix',
features: 2,
fileFormatVersion: 65536,
dataOffset: 512,
timestamp: 0,
creatorApplication: 'caml',
creatorVersion: 1,
creatorHostOs: 0,
originalSize: 53687091200,
currentSize: 53687091200,
diskGeometry: { cylinders: 25700, heads: 16, sectorsPerTrackCylinder: 255 },
diskType: 3,
checksum: 4294962945,
uuid: Buffer.from('d8dbcad85265421e8b298d99c2eec551', 'utf-8'),
saved: '',
hidden: '',
reserved: '',
}
exports.VHDHEADER = {
cookie: 'cxsparse',
dataOffset: undefined,
tableOffset: 2048,
headerVersion: 65536,
maxTableEntries: 25600,
blockSize: 2097152,
checksum: 4294964241,
parentUuid: null,
parentTimestamp: 0,
reserved1: 0,
parentUnicodeName: '',
parentLocatorEntry: [
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
],
reserved2: '',
}

View File

@@ -3,7 +3,7 @@ const map = require('lodash/map.js')
const mapValues = require('lodash/mapValues.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
const { chainVhd, checkVhdChain, VhdFile } = require('vhd-lib')
const { createLogger } = require('@xen-orchestra/log')
const { dirname } = require('path')
@@ -16,7 +16,6 @@ const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
@@ -38,13 +37,13 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await asyncMap(vhds, async path => {
try {
await checkVhdChain(handler, path)
await Disposable.use(
openVhd(handler, path),
vhd => (found = found || vhd.footer.uuid.equals(packUuid(baseUuid)))
)
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
await ignoreErrors.call(handler.unlink(path))
}
})
} catch (error) {
@@ -145,7 +144,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
// don't do delta for it
vdi.uuid
: vdi.$snapshot_of$uuid
}/${adapter.getVhdFileName(basename)}`
}/${basename}.vhd`
)
const metadataFilename = `${backupDir}/${basename}.json`
@@ -189,7 +188,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
await adapter.outputStream(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
@@ -201,11 +200,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
// set the correct UUID in the VHD
await Disposable.use(openVhd(handler, path), async vhd => {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
)
return {

View File

@@ -1,6 +1,5 @@
const openVhd = require('vhd-lib').openVhd
const Disposable = require('promise-toolbox/Disposable')
const Vhd = require('vhd-lib').VhdFile
exports.checkVhd = async function checkVhd(handler, path) {
await Disposable.use(openVhd(handler, path), () => {})
await new Vhd(handler, path).readHeaderAndFooter()
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.19.1",
"version": "0.18.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -33,7 +33,7 @@
"proper-lockfile": "^4.1.2",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",
"xo-remote-parser": "^0.8.0"
"xo-remote-parser": "^0.7.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -76,7 +76,6 @@ export default class RemoteHandlerAbstract {
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
this.closeFile = sharedLimit(this.closeFile)
this.copy = sharedLimit(this.copy)
this.getInfo = sharedLimit(this.getInfo)
this.getSize = sharedLimit(this.getSize)
this.list = sharedLimit(this.list)
@@ -308,17 +307,6 @@ export default class RemoteHandlerAbstract {
return p
}
async copy(oldPath, newPath, { checksum = false } = {}) {
oldPath = normalizePath(oldPath)
newPath = normalizePath(newPath)
let p = timeout.call(this._copy(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._copy(checksumFile(oldPath), checksumFile(newPath))])
}
return p
}
async rmdir(dir) {
await timeout.call(this._rmdir(normalizePath(dir)).catch(ignoreEnoent), this._timeout)
}
@@ -531,9 +519,6 @@ export default class RemoteHandlerAbstract {
async _rename(oldPath, newPath) {
throw new Error('Not implemented')
}
async _copy(oldPath, newPath) {
throw new Error('Not implemented')
}
async _rmdir(dir) {
throw new Error('Not implemented')

View File

@@ -33,10 +33,6 @@ export default class LocalHandler extends RemoteHandlerAbstract {
return fs.close(fd)
}
async _copy(oldPath, newPath) {
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)

View File

@@ -1,7 +1,6 @@
import aws from '@sullux/aws-sdk'
import assert from 'assert'
import http from 'http'
import https from 'https'
import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
@@ -17,7 +16,7 @@ const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the
export default class S3Handler extends RemoteHandlerAbstract {
constructor(remote, _opts) {
super(remote)
const { allowUnauthorized, host, path, username, password, protocol, region } = parse(remote.url)
const { host, path, username, password, protocol, region } = parse(remote.url)
const params = {
accessKeyId: username,
apiVersion: '2006-03-01',
@@ -30,13 +29,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
},
}
if (protocol === 'http') {
params.httpOptions.agent = new http.Agent({ keepAlive: true })
params.httpOptions.agent = new http.Agent()
params.sslEnabled = false
} else if (protocol === 'https') {
params.httpOptions.agent = new https.Agent({
rejectUnauthorized: !allowUnauthorized,
keepAlive: true,
})
}
if (region !== undefined) {
params.region = region
@@ -57,27 +51,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
return { Bucket: this._bucket, Key: this._dir + file }
}
async _copy(oldPath, newPath) {
const size = await this._getSize(oldPath)
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
try {
const parts = []
let start = 0
while (start < size) {
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
const upload = await this._s3.uploadPartCopy(partParams)
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
start += MAX_PART_SIZE
}
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
} catch (e) {
await this._s3.abortMultipartUpload(multipartParams)
throw e
}
}
async _isNotEmptyDir(path) {
const result = await this._s3.listObjectsV2({
Bucket: this._bucket,
@@ -174,9 +147,25 @@ export default class S3Handler extends RemoteHandlerAbstract {
// nothing to do, directories do not exist, they are part of the files' path
}
// s3 doesn't have a rename operation, so copy + delete source
async _rename(oldPath, newPath) {
await this.copy(oldPath, newPath)
const size = await this._getSize(oldPath)
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
try {
const parts = []
let start = 0
while (start < size) {
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
const upload = await this._s3.uploadPartCopy(partParams)
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
start += MAX_PART_SIZE
}
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
} catch (e) {
await this._s3.abortMultipartUpload(multipartParams)
throw e
}
await this._s3.deleteObject(this._createParams(oldPath))
}

View File

@@ -13,7 +13,7 @@ module.exports = class Config {
const watchers = (this._watchers = new Set())
app.hooks.on('start', async () => {
app.hooks.once(
app.hooks.on(
'stop',
await watch({ appDir, appName, ignoreUnknownFormats: true }, (error, config) => {
if (error != null) {

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.15.3",
"version": "0.15.2",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -31,13 +31,13 @@
"@vates/decorate-with": "^0.1.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.0",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/backups": "^0.15.1",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.8.4",
"@xen-orchestra/xapi": "^0.8.0",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.1.0",
@@ -47,7 +47,7 @@
"golike-defer": "^0.5.1",
"http-server-plus": "^0.11.0",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
"jsonrpc-websocket-client": "^0.6.0",
"koa": "^2.5.1",
"koa-compress": "^5.0.1",
"koa-helmet": "^5.1.0",

View File

@@ -1,4 +1,3 @@
import assert from 'assert'
import fse from 'fs-extra'
import xdg from 'xdg-basedir'
import { createLogger } from '@xen-orchestra/log'
@@ -11,48 +10,33 @@ const { warn } = createLogger('xo:proxy:authentication')
const isValidToken = t => typeof t === 'string' && t.length !== 0
export default class Authentication {
#token
constructor(_, { appName, config: { authenticationToken: token } }) {
if (!isValidToken(token)) {
token = JSON.parse(execFileSync('xenstore-read', ['vm-data/xo-proxy-authenticationToken']))
constructor(app, { appName, config: { authenticationToken: token } }) {
const setToken = ({ token }) => {
assert(isValidToken(token), 'invalid authentication token: ' + token)
// save this token in the automatically handled conf file
fse.outputFileSync(
// this file must take precedence over normal user config
`${xdg.config}/${appName}/config.z-auto.json`,
JSON.stringify({ authenticationToken: token }),
{ mode: 0o600 }
)
this.#token = token
}
if (isValidToken(token)) {
this.#token = token
} else {
setToken({ token: JSON.parse(execFileSync('xenstore-read', ['vm-data/xo-proxy-authenticationToken'])) })
if (!isValidToken(token)) {
throw new Error('missing authenticationToken in configuration')
}
try {
// save this token in the automatically handled conf file
fse.outputFileSync(
// this file must take precedence over normal user config
`${xdg.config}/${appName}/config.z-auto.json`,
JSON.stringify({ authenticationToken: token }),
{ mode: 0o600 }
)
execFileSync('xenstore-rm', ['vm-data/xo-proxy-authenticationToken'])
} catch (error) {
warn('failed to remove token from XenStore', { error })
}
}
app.api.addMethod('authentication.setToken', setToken, {
description: 'change the authentication token used by this XO Proxy',
params: {
token: {
type: 'string',
minLength: 1,
},
},
})
this._token = token
}
async findProfile(credentials) {
if (credentials?.authenticationToken === this.#token) {
if (credentials?.authenticationToken === this._token) {
return new Profile()
}
}

View File

@@ -44,7 +44,7 @@
"pw": "^0.0.4",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-lib": "^0.10.1",
"xo-vmdk-to-vhd": "^2.0.0"
},
"devDependencies": {

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "0.8.4",
"version": "0.8.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {

View File

@@ -98,16 +98,18 @@ function removeWatcher(predicate, cb) {
class Xapi extends Base {
constructor({
callRetryWhenTooManyPendingTasks = { delay: 5e3, tries: 10 },
callRetryWhenTooManyPendingTasks,
ignoreNobakVdis,
maxUncoalescedVdis,
vdiDestroyRetryWhenInUse = { delay: 5e3, tries: 10 },
vdiDestroyRetryWhenInUse,
...opts
}) {
assert.notStrictEqual(ignoreNobakVdis, undefined)
super(opts)
this._callRetryWhenTooManyPendingTasks = {
delay: 5e3,
tries: 10,
...callRetryWhenTooManyPendingTasks,
onRetry,
when: { code: 'TOO_MANY_PENDING_TASKS' },
@@ -115,6 +117,8 @@ class Xapi extends Base {
this._ignoreNobakVdis = ignoreNobakVdis
this._maxUncoalescedVdis = maxUncoalescedVdis
this._vdiDestroyRetryWhenInUse = {
delay: 5e3,
retries: 10,
...vdiDestroyRetryWhenInUse,
onRetry,
when: { code: 'VDI_IN_USE' },

View File

@@ -99,7 +99,6 @@ module.exports = class Vm {
// should coalesce
const children = childrenMap[vdi.uuid]
if (
children !== undefined && // unused unmanaged VDI, will be GC-ed
children.length === 1 &&
!children[0].managed && // some SRs do not coalesce the leaf
tolerance-- <= 0

View File

@@ -1,41 +1,8 @@
# ChangeLog
## **5.65.0** (2021-11-30)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Highlights
- [VM] Ability to export a snapshot's memory (PR [#6015](https://github.com/vatesfr/xen-orchestra/pull/6015))
- [Cloud config] Ability to create a network cloud config template and reuse it in the VM creation [#5931](https://github.com/vatesfr/xen-orchestra/issues/5931) (PR [#5979](https://github.com/vatesfr/xen-orchestra/pull/5979))
- [Backup/logs] identify XAPI errors (PR [#6001](https://github.com/vatesfr/xen-orchestra/pull/6001))
- [lite] Highlight selected VM (PR [#5939](https://github.com/vatesfr/xen-orchestra/pull/5939))
### Enhancements
- [S3] Ability to authorize self signed certificates for S3 remote (PR [#5961](https://github.com/vatesfr/xen-orchestra/pull/5961))
### Bug fixes
- [Import/VM] Fix the import of OVA files (PR [#5976](https://github.com/vatesfr/xen-orchestra/pull/5976))
### Packages to release
- @vates/async-each 0.1.0
- xo-remote-parser 0.8.4
- @xen-orchestra/fs 0.19.0
- @xen-orchestra/xapi patch
- vhd-lib 2.0.1
- @xen-orchestra/backups 0.16.0
- xo-lib 0.11.1
- @xen-orchestra/proxy 0.15.3
- xo-server 5.84.1
- vhd-cli 0.6.0
- xo-web 5.90.0
## **5.64.0** (2021-10-29)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
# ChangeLog
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
## Highlights
@@ -78,6 +45,8 @@
## **5.63.0** (2021-09-30)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Highlights
- [Backup] Go back to previous page instead of going to the overview after editing a job: keeps current filters and page (PR [#5913](https://github.com/vatesfr/xen-orchestra/pull/5913))

View File

@@ -11,9 +11,7 @@
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Delta Backup Restoration] Fix assertion error [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/16)
- [Delta Backup Restoration] `TypeError: this disposable has already been disposed` [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/20)
- [Delta Backup Restoration] Fix error `ENOENT: no such file '/xo-vm-backups/../parentLocatorEntryN' with vhd having empty parent locator
[Import/VM] Fix the import of OVA files (PR [#5976](https://github.com/vatesfr/xen-orchestra/pull/5976))
### Packages to release
@@ -32,8 +30,7 @@
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- xo-vmdk-to-vhd patch
- vhd-lib patch
- @xen-orchestra/backups patch
- @xen-orchestra/proxy patch
- @xen-orchestra/fs minor
- vhd-lib minor
- xo-server patch
- vhd-cli minor

View File

@@ -26,12 +26,6 @@ Each backups' job execution is identified by a `runId`. You can find this `runId
![](./assets/log-runId.png)
## Exclude disks
During a backup job, you can avoid saving all disks of the VM. To do that is trivial: just edit the VM disk name and add `[NOBAK]` before the current name, eg: `data-disk` will become `[NOBAK] data-disk` (with a space or not, doesn't matter).
The disks marked with `[NOBAK]` will be now ignored in all following backups.
## Schedule
:::tip

View File

@@ -43,6 +43,12 @@ Just go into your "Backup" view, and select Delta Backup. Then, it's the same as
Unlike other types of backup jobs which delete the associated snapshot when the job is done and it has been exported, delta backups always keep a snapshot of every VM in the backup job, and uses it for the delta. Do not delete these snapshots!
## Exclude disks
During a delta backup job, you can avoid saving all disks of the VM. To do that is trivial: just edit the VM disk name and add `[NOBAK]` before the current name, eg: `data-disk` will become `[NOBAK] data-disk` (with a space or not, doesn't matter).
The disks marked with `[NOBAK]` will be now ignored in all following backups.
## Delta backup initial seed
If you don't want to do an initial full directly toward the destination, you can create a local delta backup first, then transfer the files to your destination.

View File

@@ -19,7 +19,7 @@
"handlebars": "^4.7.6",
"husky": "^4.2.5",
"jest": "^27.3.1",
"lint-staged": "^12.0.3",
"lint-staged": "^11.1.2",
"lodash": "^4.17.4",
"prettier": "^2.0.5",
"promise-toolbox": "^0.20.0",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-cli",
"version": "0.6.0",
"version": "0.5.0",
"license": "ISC",
"description": "Tools to read/create and merge VHD files",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
@@ -24,12 +24,11 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/fs": "^0.18.0",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"human-format": "^0.11.0",
"vhd-lib": "^2.0.1"
"vhd-lib": "^1.3.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,34 +1,6 @@
import { Constants, VhdFile } from 'vhd-lib'
import { VhdFile } from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
import humanFormat from 'human-format'
import invert from 'lodash/invert.js'
const { PLATFORMS } = Constants
const DISK_TYPES_MAP = invert(Constants.DISK_TYPES)
const PLATFORMS_MAP = invert(PLATFORMS)
const MAPPERS = {
bytes: humanFormat.bytes,
date: _ => (_ !== 0 ? new Date(_) : 0),
diskType: _ => DISK_TYPES_MAP[_],
platform: _ => PLATFORMS_MAP[_],
}
function mapProperties(object, mapping) {
const result = { ...object }
for (const prop of Object.keys(mapping)) {
const value = object[prop]
if (value !== undefined) {
let mapper = mapping[prop]
if (typeof mapper === 'string') {
mapper = MAPPERS[mapper]
}
result[prop] = mapper(value)
}
}
return result
}
export default async args => {
const vhd = new VhdFile(getHandler({ url: 'file:///' }), resolve(args[0]))
@@ -40,26 +12,6 @@ export default async args => {
await vhd.readHeaderAndFooter(false)
}
console.log(
mapProperties(vhd.footer, {
currentSize: 'bytes',
diskType: 'diskType',
originalSize: 'bytes',
timestamp: 'date',
})
)
console.log(
mapProperties(vhd.header, {
blockSize: 'bytes',
parentTimestamp: 'date',
parentLocatorEntry: _ =>
_.filter(_ => _.platformCode !== PLATFORMS.NONE) // hide empty
.map(_ =>
mapProperties(_, {
platformCode: 'platform',
})
),
})
)
console.log(vhd.header)
console.log(vhd.footer)
}

View File

@@ -1,18 +1,13 @@
import { openVhd } from 'vhd-lib'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { createContentStream } from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
import { writeStream } from '../_utils'
import { Disposable } from 'promise-toolbox'
export default async args => {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <input VHD> [<output raw>]`
}
await Disposable.use(async function* () {
const handler = getSyncedHandler({ url: 'file:///' })
const vhd = openVhd(handler, resolve(args[0]))
await writeStream(vhd.rawContent())
})
await writeStream(createContentStream(getHandler({ url: 'file:///' }), resolve(args[0])), args[1])
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "2.0.1",
"version": "1.3.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -16,7 +16,6 @@
"node": ">=10"
},
"dependencies": {
"@vates/async-each": "^0.1.0",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"async-iterator-to-stream": "^1.0.2",
@@ -31,7 +30,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/fs": "^0.18.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^5.0.0",

View File

@@ -7,10 +7,8 @@ import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { openVhd } from '../index'
import { checkFile, createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from '../tests/utils'
import { createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from '../tests/utils'
import { VhdAbstract } from './VhdAbstract'
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from '../_constants'
import { unpackHeader, unpackFooter } from './_utils'
let tempDir
@@ -24,15 +22,6 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
const streamToBuffer = stream => {
let buffer = Buffer.alloc(0)
return new Promise((resolve, reject) => {
stream.on('data', data => (buffer = Buffer.concat([buffer, data])))
stream.on('end', () => resolve(buffer))
})
}
test('It creates an alias', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' + tempDir })
@@ -66,9 +55,9 @@ test('It creates an alias', async () => {
test('alias must have *.alias.vhd extension', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
const aliasPath = `${tempDir}/invalidalias.vhd`
const targetPath = `${tempDir}/targets.vhd`
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const aliasPath = 'invalidalias.vhd'
const targetPath = 'targets.vhd'
expect(async () => await VhdAbstract.createAlias(handler, aliasPath, targetPath)).rejects.toThrow()
expect(await fs.exists(aliasPath)).toEqual(false)
@@ -77,9 +66,9 @@ test('alias must have *.alias.vhd extension', async () => {
test('alias must not be chained', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
const aliasPath = `${tempDir}/valid.alias.vhd`
const targetPath = `${tempDir}/an.other.valid.alias.vhd`
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const aliasPath = 'valid.alias.vhd'
const targetPath = 'an.other.valid.alias.vhd'
expect(async () => await VhdAbstract.createAlias(handler, aliasPath, targetPath)).rejects.toThrow()
expect(await fs.exists(aliasPath)).toEqual(false)
})
@@ -89,19 +78,17 @@ test('It rename and unlink a VHDFile', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await convertFromRawToVhd(rawFileName, `${tempDir}/randomfile.vhd`)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
const { size } = await fs.stat(vhdFileName)
const targetFileName = `${tempDir}/renamed.vhd`
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const { size } = await fs.stat(`${tempDir}/randomfile.vhd`)
await VhdAbstract.rename(handler, vhdFileName, targetFileName)
expect(await fs.exists(vhdFileName)).toEqual(false)
const { size: renamedSize } = await fs.stat(targetFileName)
await VhdAbstract.rename(handler, 'randomfile.vhd', 'renamed.vhd')
expect(await fs.exists(`${tempDir}/randomfile.vhd`)).toEqual(false)
const { size: renamedSize } = await fs.stat(`${tempDir}/renamed.vhd`)
expect(size).toEqual(renamedSize)
await VhdAbstract.unlink(handler, targetFileName)
expect(await fs.exists(targetFileName)).toEqual(false)
await VhdAbstract.unlink(handler, 'renamed.vhd')
expect(await fs.exists(`${tempDir}/renamed.vhd`)).toEqual(false)
})
})
@@ -111,21 +98,15 @@ test('It rename and unlink a VhdDirectory', async () => {
await createRandomVhdDirectory(vhdDirectory, initalSize)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
const vhd = yield openVhd(handler, vhdDirectory)
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'randomfile.dir')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
const targetFileName = `${tempDir}/renamed.vhd`
// it should clean an existing directory
await fs.mkdir(targetFileName)
await fs.writeFile(`${targetFileName}/dummy`, 'I exists')
await VhdAbstract.rename(handler, vhdDirectory, targetFileName)
expect(await fs.exists(vhdDirectory)).toEqual(false)
expect(await fs.exists(targetFileName)).toEqual(true)
expect(await fs.exists(`${targetFileName}/dummy`)).toEqual(false)
await VhdAbstract.unlink(handler, targetFileName)
expect(await fs.exists(targetFileName)).toEqual(false)
await VhdAbstract.rename(handler, 'randomfile.dir', 'renamed.vhd')
expect(await fs.exists(`${tempDir}/randomfile.dir`)).toEqual(false)
await VhdAbstract.unlink(handler, `renamed.vhd`)
expect(await fs.exists(`${tempDir}/renamed.vhd`)).toEqual(false)
})
})
@@ -139,131 +120,19 @@ test('It create , rename and unlink alias', async () => {
const aliasFileNameRenamed = `${tempDir}/aliasFileNameRenamed.alias.vhd`
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
await VhdAbstract.createAlias(handler, aliasFileName, vhdFileName)
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
await VhdAbstract.createAlias(handler, 'aliasFileName.alias.vhd', 'randomfile.vhd')
expect(await fs.exists(aliasFileName)).toEqual(true)
expect(await fs.exists(vhdFileName)).toEqual(true)
await VhdAbstract.rename(handler, aliasFileName, aliasFileNameRenamed)
await VhdAbstract.rename(handler, 'aliasFileName.alias.vhd', 'aliasFileNameRenamed.alias.vhd')
expect(await fs.exists(aliasFileName)).toEqual(false)
expect(await fs.exists(vhdFileName)).toEqual(true)
expect(await fs.exists(aliasFileNameRenamed)).toEqual(true)
await VhdAbstract.unlink(handler, aliasFileNameRenamed)
await VhdAbstract.unlink(handler, 'aliasFileNameRenamed.alias.vhd')
expect(await fs.exists(aliasFileName)).toEqual(false)
expect(await fs.exists(vhdFileName)).toEqual(false)
expect(await fs.exists(aliasFileNameRenamed)).toEqual(false)
})
})
test('it can create a vhd stream', async () => {
const initialNbBlocks = 3
const initalSize = initialNbBlocks * 2
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/vhd.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const bat = Buffer.alloc(512)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' + tempDir })
const vhd = yield openVhd(handler, 'vhd.vhd')
// mark first block as unused
await handler.read('vhd.vhd', bat, vhd.header.tableOffset)
bat.writeUInt32BE(BLOCK_UNUSED, 0)
await handler.write('vhd.vhd', bat, vhd.header.tableOffset)
// read our modified bat
await vhd.readBlockAllocationTable()
const stream = vhd.stream()
// read all the stream into a buffer
const buffer = await streamToBuffer(stream)
const length = buffer.length
const bufFooter = buffer.slice(0, FOOTER_SIZE)
// footer is still valid
expect(() => unpackFooter(bufFooter)).not.toThrow()
const footer = unpackFooter(bufFooter)
// header is still valid
const bufHeader = buffer.slice(FOOTER_SIZE, HEADER_SIZE + FOOTER_SIZE)
expect(() => unpackHeader(bufHeader, footer)).not.toThrow()
// 1 deleted block should be in ouput
const start = FOOTER_SIZE + HEADER_SIZE + vhd.batSize
expect(length).toEqual(start + (initialNbBlocks - 1) * vhd.fullBlockSize + FOOTER_SIZE)
expect(stream.length).toEqual(buffer.length)
// blocks
const blockBuf = Buffer.alloc(vhd.sectorsPerBlock * SECTOR_SIZE, 0)
for (let i = 1; i < initialNbBlocks; i++) {
const blockDataStart = start + (i - 1) * vhd.fullBlockSize + 512 /* block bitmap */
const blockDataEnd = blockDataStart + vhd.sectorsPerBlock * SECTOR_SIZE
const content = buffer.slice(blockDataStart, blockDataEnd)
await handler.read('randomfile', blockBuf, i * vhd.sectorsPerBlock * SECTOR_SIZE)
expect(content).toEqual(blockBuf)
}
// footer
const endFooter = buffer.slice(length - FOOTER_SIZE)
expect(bufFooter).toEqual(endFooter)
await handler.writeFile('out.vhd', buffer)
// check that the vhd is still valid
await checkFile(`${tempDir}/out.vhd`)
})
})
it('can stream content', async () => {
const initalSizeMb = 5 // 2 block and an half
const initialNbBlocks = Math.ceil(initalSizeMb / 2)
const initialByteSize = initalSizeMb * 1024 * 1024
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSizeMb)
const vhdFileName = `${tempDir}/vhd.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const bat = Buffer.alloc(512)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' + tempDir })
const vhd = yield openVhd(handler, 'vhd.vhd')
// mark first block as unused
await handler.read('vhd.vhd', bat, vhd.header.tableOffset)
bat.writeUInt32BE(BLOCK_UNUSED, 0)
await handler.write('vhd.vhd', bat, vhd.header.tableOffset)
// read our modified block allocation table
await vhd.readBlockAllocationTable()
const stream = vhd.rawContent()
const buffer = await streamToBuffer(stream)
// qemu can modify size, to align it to geometry
// check that data didn't change
const blockDataLength = vhd.sectorsPerBlock * SECTOR_SIZE
// first block should be empty
const EMPTY = Buffer.alloc(blockDataLength, 0)
const firstBlock = buffer.slice(0, blockDataLength)
// using buffer1 toEquals buffer2 make jest crash trying to stringify it on failure
expect(firstBlock.equals(EMPTY)).toEqual(true)
let remainingLength = initialByteSize - blockDataLength // already checked the first block
for (let i = 1; i < initialNbBlocks; i++) {
// last block will be truncated
const blockSize = Math.min(blockDataLength, remainingLength - blockDataLength)
const blockDataStart = i * blockDataLength // first block have been deleted
const blockDataEnd = blockDataStart + blockSize
const content = buffer.slice(blockDataStart, blockDataEnd)
const blockBuf = Buffer.alloc(blockSize, 0)
await handler.read('randomfile', blockBuf, i * blockDataLength)
expect(content.equals(blockBuf)).toEqual(true)
remainingLength -= blockSize
}
})
})

View File

@@ -1,38 +1,29 @@
import { computeBatSize, computeSectorOfBitmap, computeSectorsPerBlock, sectorsToBytes } from './_utils'
import { PLATFORMS, SECTOR_SIZE, PARENT_LOCATOR_ENTRIES, FOOTER_SIZE, HEADER_SIZE, BLOCK_UNUSED } from '../_constants'
import { computeBatSize, sectorsRoundUpNoZero, sectorsToBytes } from './_utils'
import { PLATFORM_NONE, SECTOR_SIZE, PLATFORM_W2KU, PARENT_LOCATOR_ENTRIES } from '../_constants'
import { resolveAlias, isVhdAlias } from '../_resolveAlias'
import assert from 'assert'
import path from 'path'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { checksumStruct, fuFooter, fuHeader } from '../_structs'
import { isVhdAlias, resolveAlias } from '../_resolveAlias'
export class VhdAbstract {
#header
bitmapSize
footer
get bitmapSize() {
return sectorsToBytes(this.sectorsOfBitmap)
}
get fullBlockSize() {
return sectorsToBytes(this.sectorsOfBitmap + this.sectorsPerBlock)
}
fullBlockSize
sectorsOfBitmap
sectorsPerBlock
get header() {
assert.notStrictEqual(this.#header, undefined, `header must be read before it's used`)
return this.#header
}
get sectorsOfBitmap() {
return computeSectorOfBitmap(this.header.blockSize)
}
get sectorsPerBlock() {
return computeSectorsPerBlock(this.header.blockSize)
}
set header(header) {
this.#header = header
this.sectorsPerBlock = header.blockSize / SECTOR_SIZE
this.sectorsOfBitmap = sectorsRoundUpNoZero(this.sectorsPerBlock >> 3)
this.fullBlockSize = sectorsToBytes(this.sectorsOfBitmap + this.sectorsPerBlock)
this.bitmapSize = sectorsToBytes(this.sectorsOfBitmap)
}
/**
@@ -92,10 +83,8 @@ export class VhdAbstract {
*
* @returns {number} the merged data size
*/
async coalesceBlock(child, blockId) {
const block = await child.readBlock(blockId)
await this.writeEntireBlock(block)
return block.data.length
coalesceBlock(child, blockId) {
throw new Error(`coalescing the block ${blockId} from ${child} is not implemented`)
}
/**
@@ -128,7 +117,7 @@ export class VhdAbstract {
return computeBatSize(this.header.maxTableEntries)
}
async writeParentLocator({ id, platformCode = PLATFORMS.NONE, data = Buffer.alloc(0) }) {
async writeParentLocator({ id, platformCode = PLATFORM_NONE, data = Buffer.alloc(0) }) {
assert(id >= 0, 'parent Locator id must be a positive number')
assert(id < PARENT_LOCATOR_ENTRIES, `parent Locator id must be less than ${PARENT_LOCATOR_ENTRIES}`)
@@ -157,14 +146,14 @@ export class VhdAbstract {
async setUniqueParentLocator(fileNameString) {
await this.writeParentLocator({
id: 0,
platformCode: PLATFORMS.W2KU,
platformCode: PLATFORM_W2KU,
data: Buffer.from(fileNameString, 'utf16le'),
})
for (let i = 1; i < PARENT_LOCATOR_ENTRIES; i++) {
await this.writeParentLocator({
id: i,
platformCode: PLATFORMS.NONE,
platformCode: PLATFORM_NONE,
data: Buffer.alloc(0),
})
}
@@ -180,10 +169,6 @@ export class VhdAbstract {
}
static async rename(handler, sourcePath, targetPath) {
try {
// delete target if it already exists
await VhdAbstract.unlink(handler, targetPath)
} catch (e) {}
await handler.rename(sourcePath, targetPath)
}
@@ -219,97 +204,4 @@ export class VhdAbstract {
const relativePathToTarget = path.relative(aliasDir, path.resolve('/', targetPath))
await handler.writeFile(aliasPath, relativePathToTarget)
}
stream() {
const { footer, batSize } = this
const { ...header } = this.header // copy since we don't ant to modifiy the current header
const rawFooter = fuFooter.pack(footer)
checksumStruct(rawFooter, fuFooter)
// compute parent locator place and size
// update them in header
// update checksum in header
let offset = FOOTER_SIZE + HEADER_SIZE + batSize
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const { ...entry } = header.parentLocatorEntry[i]
if (entry.platformDataSpace > 0) {
entry.platformDataOffset = offset
offset += entry.platformDataSpace
}
header.parentLocatorEntry[i] = entry
}
const rawHeader = fuHeader.pack(header)
checksumStruct(rawHeader, fuHeader)
assert.strictEqual(offset % SECTOR_SIZE, 0)
const bat = Buffer.allocUnsafe(batSize)
let offsetSector = offset / SECTOR_SIZE
const blockSizeInSectors = this.fullBlockSize / SECTOR_SIZE
let fileSize = offsetSector * SECTOR_SIZE + FOOTER_SIZE /* the footer at the end */
// compute BAT , blocks starts after parent locator entries
for (let i = 0; i < header.maxTableEntries; i++) {
if (this.containsBlock(i)) {
bat.writeUInt32BE(offsetSector, i * 4)
offsetSector += blockSizeInSectors
fileSize += this.fullBlockSize
} else {
bat.writeUInt32BE(BLOCK_UNUSED, i * 4)
}
}
const self = this
async function* iterator() {
yield rawFooter
yield rawHeader
yield bat
// yield parent locator entries
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
if (header.parentLocatorEntry[i].platformDataSpace > 0) {
const parentLocator = await self.readParentLocator(i)
// @ todo pad to platformDataSpace
yield parentLocator.data
}
}
// yield all blocks
// since contains() can be costly for synthetic vhd, use the computed bat
for (let i = 0; i < header.maxTableEntries; i++) {
if (bat.readUInt32BE(i * 4) !== BLOCK_UNUSED) {
const block = await self.readBlock(i)
yield block.buffer
}
}
// yield footer again
yield rawFooter
}
const stream = asyncIteratorToStream(iterator())
stream.length = fileSize
return stream
}
rawContent() {
const { header, footer } = this
const { blockSize } = header
const self = this
async function* iterator() {
const nBlocks = header.maxTableEntries
let remainingSize = footer.currentSize
const EMPTY = Buffer.alloc(blockSize, 0)
for (let blockId = 0; blockId < nBlocks; ++blockId) {
let buffer = self.containsBlock(blockId) ? (await self.readBlock(blockId)).data : EMPTY
// the last block can be truncated since raw size is not a multiple of blockSize
buffer = remainingSize < blockSize ? buffer.slice(0, remainingSize) : buffer
remainingSize -= blockSize
yield buffer
}
}
const stream = asyncIteratorToStream(iterator())
stream.length = footer.currentSize
return stream
}
}

View File

@@ -1,67 +0,0 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { openVhd } from '../openVhd'
import { createRandomFile, convertFromRawToVhd, convertToVhdDirectory } from '../tests/utils'
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('Can coalesce block', async () => {
const initalSize = 4
const parentrawFileName = `${tempDir}/randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const parentDirectoryName = `${tempDir}/parent.dir.vhd`
await createRandomFile(parentrawFileName, initalSize)
await convertFromRawToVhd(parentrawFileName, parentFileName)
await convertToVhdDirectory(parentrawFileName, parentFileName, parentDirectoryName)
const childrawFileName = `${tempDir}/randomfile`
const childFileName = `${tempDir}/childFile.vhd`
await createRandomFile(childrawFileName, initalSize)
await convertFromRawToVhd(childrawFileName, childFileName)
const childRawDirectoryName = `${tempDir}/randomFile2.vhd`
const childDirectoryFileName = `${tempDir}/childDirFile.vhd`
const childDirectoryName = `${tempDir}/childDir.vhd`
await createRandomFile(childRawDirectoryName, initalSize)
await convertFromRawToVhd(childRawDirectoryName, childDirectoryFileName)
await convertToVhdDirectory(childRawDirectoryName, childDirectoryFileName, childDirectoryName)
await Disposable.use(async function* () {
const handler = getHandler({ url: 'file://' })
const parentVhd = yield openVhd(handler, parentDirectoryName, { flags: 'w' })
await parentVhd.readBlockAllocationTable()
const childFileVhd = yield openVhd(handler, childFileName)
await childFileVhd.readBlockAllocationTable()
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.coalesceBlock(childFileVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
let parentBlockData = (await parentVhd.readBlock(0)).data
let childBlockData = (await childFileVhd.readBlock(0)).data
expect(parentBlockData.equals(childBlockData)).toEqual(true)
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
parentBlockData = (await parentVhd.readBlock(0)).data
childBlockData = (await childDirectoryVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
})
})

View File

@@ -1,4 +1,4 @@
import { unpackHeader, unpackFooter, sectorsToBytes } from './_utils'
import { buildHeader, buildFooter } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { test, set as setBitmap } from '../_bitmap'
@@ -39,8 +39,8 @@ export class VhdDirectory extends VhdAbstract {
this.#uncheckedBlockTable = blockTable
}
static async open(handler, path, { flags = 'r+' } = {}) {
const vhd = new VhdDirectory(handler, path, { flags })
static async open(handler, path) {
const vhd = new VhdDirectory(handler, path)
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
@@ -54,20 +54,19 @@ export class VhdDirectory extends VhdAbstract {
}
}
static async create(handler, path, { flags = 'wx+' } = {}) {
static async create(handler, path) {
await handler.mkdir(path)
const vhd = new VhdDirectory(handler, path, { flags })
const vhd = new VhdDirectory(handler, path)
return {
dispose: () => {},
value: vhd,
}
}
constructor(handler, path, opts) {
constructor(handler, path) {
super()
this._handler = handler
this._path = path
this._opts = opts
}
async readBlockAllocationTable() {
@@ -79,13 +78,13 @@ export class VhdDirectory extends VhdAbstract {
return test(this.#blockTable, blockId)
}
_getChunkPath(partName) {
getChunkPath(partName) {
return this._path + '/' + partName
}
async _readChunk(partName) {
// here we can implement compression and / or crypto
const buffer = await this._handler.readFile(this._getChunkPath(partName))
const buffer = await this._handler.readFile(this.getChunkPath(partName))
return {
buffer: Buffer.from(buffer),
@@ -93,14 +92,20 @@ export class VhdDirectory extends VhdAbstract {
}
async _writeChunk(partName, buffer) {
assert.notStrictEqual(
this._opts?.flags,
'r',
`Can't write a chunk ${partName} in ${this._path} with read permission`
)
assert(Buffer.isBuffer(buffer))
// here we can implement compression and / or crypto
return this._handler.outputFile(this._getChunkPath(partName), buffer, this._opts)
// chunks can be in sub directories : create direcotries if necessary
const pathParts = partName.split('/')
let currentPath = this._path
// the last one is the file name
for (let i = 0; i < pathParts.length - 1; i++) {
currentPath += '/' + pathParts[i]
await this._handler.mkdir(currentPath)
}
return this._handler.writeFile(this.getChunkPath(partName), buffer)
}
// put block in subdirectories to limit impact when doing directory listing
@@ -113,8 +118,8 @@ export class VhdDirectory extends VhdAbstract {
async readHeaderAndFooter() {
const { buffer: bufHeader } = await this._readChunk('header')
const { buffer: bufFooter } = await this._readChunk('footer')
const footer = unpackFooter(bufFooter)
const header = unpackHeader(bufHeader, footer)
const footer = buildFooter(bufFooter)
const header = buildHeader(bufHeader, footer)
this.footer = footer
this.header = header
@@ -162,18 +167,11 @@ export class VhdDirectory extends VhdAbstract {
return this._writeChunk('bat', this.#blockTable)
}
// only works if data are in the same handler
// only works if data are in the same bucket
// and if the full block is modified in child ( which is the case whit xcp)
async coalesceBlock(child, blockId) {
if (!(child instanceof VhdDirectory) || this._handler !== child._handler) {
return super.coalesceBlock(child, blockId)
}
await this._handler.copy(
child._getChunkPath(child._getBlockPath(blockId)),
this._getChunkPath(this._getBlockPath(blockId))
)
return sectorsToBytes(this.sectorsPerBlock)
coalesceBlock(child, blockId) {
this._handler.copy(child.getChunkPath(blockId), this.getChunkPath(blockId))
}
async writeEntireBlock(block) {
@@ -182,14 +180,7 @@ export class VhdDirectory extends VhdAbstract {
}
async _readParentLocatorData(id) {
try {
return (await this._readChunk('parentLocatorEntry' + id)).buffer
} catch (e) {
if (e.code === 'ENOENT') {
return Buffer.alloc(0)
}
throw e
}
return (await this._readChunk('parentLocatorEntry' + id)).buffer
}
async _writeParentLocatorData(id, data) {

View File

@@ -6,20 +6,13 @@ import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { pFromCallback } from 'promise-toolbox'
import { randomBytes } from 'crypto'
import { VhdFile } from './VhdFile'
import { openVhd } from '../openVhd'
import { SECTOR_SIZE } from '../_constants'
import {
checkFile,
createRandomFile,
convertFromRawToVhd,
convertToVhdDirectory,
recoverRawContent,
} from '../tests/utils'
import { checkFile, createRandomFile, convertFromRawToVhd, recoverRawContent } from '../tests/utils'
let tempDir = null
@@ -33,29 +26,6 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('respect the checkSecondFooter flag', async () => {
const initalSize = 0
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: `file://${tempDir}` })
const size = await handler.getSize('randomfile.vhd')
const fd = await handler.openFile('randomfile.vhd', 'r+')
const buffer = Buffer.alloc(512, 0)
// add a fake footer at the end
handler.write(fd, buffer, size)
await handler.closeFile(fd)
// not using openVhd to be able to call readHeaderAndFooter separatly
const vhd = new VhdFile(handler, 'randomfile.vhd')
await expect(async () => await vhd.readHeaderAndFooter()).rejects.toThrow()
await expect(async () => await vhd.readHeaderAndFooter(true)).rejects.toThrow()
await expect(await vhd.readHeaderAndFooter(false)).toEqual(undefined)
})
test('blocks can be moved', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
@@ -88,7 +58,6 @@ test('the BAT MSB is not used for sign', async () => {
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
await vhd.writeFooter()
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
@@ -193,45 +162,3 @@ test('BAT can be extended and blocks moved', async () => {
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})
test('Can coalesce block', async () => {
const initalSize = 4
const parentrawFileName = `${tempDir}/randomfile`
const parentFileName = `${tempDir}/parent.vhd`
await createRandomFile(parentrawFileName, initalSize)
await convertFromRawToVhd(parentrawFileName, parentFileName)
const childrawFileName = `${tempDir}/randomfile`
const childFileName = `${tempDir}/childFile.vhd`
await createRandomFile(childrawFileName, initalSize)
await convertFromRawToVhd(childrawFileName, childFileName)
const childRawDirectoryName = `${tempDir}/randomFile2.vhd`
const childDirectoryFileName = `${tempDir}/childDirFile.vhd`
const childDirectoryName = `${tempDir}/childDir.vhd`
await createRandomFile(childRawDirectoryName, initalSize)
await convertFromRawToVhd(childRawDirectoryName, childDirectoryFileName)
await convertToVhdDirectory(childRawDirectoryName, childDirectoryFileName, childDirectoryName)
await Disposable.use(async function* () {
const handler = getHandler({ url: 'file://' })
const parentVhd = yield openVhd(handler, parentFileName, { flags: 'r+' })
await parentVhd.readBlockAllocationTable()
const childFileVhd = yield openVhd(handler, childFileName)
await childFileVhd.readBlockAllocationTable()
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.coalesceBlock(childFileVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
let parentBlockData = (await parentVhd.readBlock(0)).data
let childBlockData = (await childFileVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
parentBlockData = (await parentVhd.readBlock(0)).data
childBlockData = (await childDirectoryVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
})
})

View File

@@ -1,8 +1,15 @@
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, PLATFORMS, SECTOR_SIZE, PARENT_LOCATOR_ENTRIES } from '../_constants'
import { computeBatSize, sectorsToBytes, unpackHeader, unpackFooter, BUF_BLOCK_UNUSED } from './_utils'
import {
BLOCK_UNUSED,
FOOTER_SIZE,
HEADER_SIZE,
PLATFORM_NONE,
SECTOR_SIZE,
PARENT_LOCATOR_ENTRIES,
} from '../_constants'
import { computeBatSize, sectorsToBytes, buildHeader, buildFooter, BUF_BLOCK_UNUSED } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { set as mapSetBit } from '../_bitmap'
import { set as mapSetBit, test as mapTestBit } from '../_bitmap'
import { VhdAbstract } from './VhdAbstract'
import assert from 'assert'
import getFirstAndLastBlocks from '../_getFirstAndLastBlocks'
@@ -71,23 +78,23 @@ export class VhdFile extends VhdAbstract {
return super.header
}
static async open(handler, path, { flags, checkSecondFooter = true } = {}) {
const fd = await handler.openFile(path, flags ?? 'r+')
static async open(handler, path) {
const fd = await handler.openFile(path, 'r+')
const vhd = new VhdFile(handler, fd)
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
// EISDIR pathname refers to a directory and the access requested
// involved writing (that is, O_WRONLY or O_RDWR is set).
// reading the header ensure we have a well formed file immediatly
await vhd.readHeaderAndFooter(checkSecondFooter)
await vhd.readHeaderAndFooter()
return {
dispose: () => handler.closeFile(fd),
value: vhd,
}
}
static async create(handler, path, { flags } = {}) {
const fd = await handler.openFile(path, flags ?? 'wx')
static async create(handler, path) {
const fd = await handler.openFile(path, 'wx')
const vhd = new VhdFile(handler, fd)
return {
dispose: () => handler.closeFile(fd),
@@ -122,7 +129,7 @@ export class VhdFile extends VhdAbstract {
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== PLATFORMS.NONE) {
if (entry.platformCode !== PLATFORM_NONE) {
end = Math.max(end, entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace))
}
}
@@ -170,8 +177,8 @@ export class VhdFile extends VhdAbstract {
const bufFooter = buf.slice(0, FOOTER_SIZE)
const bufHeader = buf.slice(FOOTER_SIZE)
const footer = unpackFooter(bufFooter)
const header = unpackHeader(bufHeader, footer)
const footer = buildFooter(bufFooter)
const header = buildHeader(bufHeader, footer)
if (checkSecondFooter) {
const size = await this._handler.getSize(this._path)
@@ -336,6 +343,47 @@ export class VhdFile extends VhdAbstract {
)
}
async coalesceBlock(child, blockId) {
const block = await child.readBlock(blockId)
const { bitmap, data } = block
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
let parentBitmap = null
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(bitmap, i)) {
continue
}
let endSector = i + 1
// Count changed sectors.
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
++endSector
}
// Write n sectors into parent.
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this.readBlock(blockId, true)).bitmap
}
await this._writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
}
// Return the merged data size
return data.length
}
// Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter(onlyEndFooter = false) {
const { footer } = this
@@ -449,8 +497,4 @@ export class VhdFile extends VhdAbstract {
header.parentLocatorEntry[parentLocatorId].platformDataOffset = position
}
}
async getSize() {
return await this._handler.getSize(this._path)
}
}

View File

@@ -1,83 +0,0 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { SECTOR_SIZE, PLATFORMS } from '../_constants'
import { createRandomFile, convertFromRawToVhd } from '../tests/utils'
import { openVhd, chainVhd } from '..'
import { VhdSynthetic } from './VhdSynthetic'
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('It can read block and parent locator from a synthetic vhd', async () => {
const bigRawFileName = `/bigrandomfile`
await createRandomFile(`${tempDir}/${bigRawFileName}`, 8)
const bigVhdFileName = `/bigrandomfile.vhd`
await convertFromRawToVhd(`${tempDir}/${bigRawFileName}`, `${tempDir}/${bigVhdFileName}`)
const smallRawFileName = `/smallrandomfile`
await createRandomFile(`${tempDir}/${smallRawFileName}`, 4)
const smallVhdFileName = `/smallrandomfile.vhd`
await convertFromRawToVhd(`${tempDir}/${smallRawFileName}`, `${tempDir}/${smallVhdFileName}`)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
// ensure the two VHD are linked, with the child of type DISK_TYPES.DIFFERENCING
await chainVhd(handler, bigVhdFileName, handler, smallVhdFileName, true)
const [smallVhd, bigVhd] = yield Disposable.all([
openVhd(handler, smallVhdFileName),
openVhd(handler, bigVhdFileName),
])
// add parent locato
// this will also scramble the block inside the vhd files
await bigVhd.writeParentLocator({
id: 0,
platformCode: PLATFORMS.W2KU,
data: Buffer.from('I am in the big one'),
})
const syntheticVhd = new VhdSynthetic([smallVhd, bigVhd])
await syntheticVhd.readBlockAllocationTable()
expect(syntheticVhd.header.diskType).toEqual(bigVhd.header.diskType)
expect(syntheticVhd.header.parentTimestamp).toEqual(bigVhd.header.parentTimestamp)
// first two block should be from small
const buf = Buffer.alloc(syntheticVhd.sectorsPerBlock * SECTOR_SIZE, 0)
let content = (await syntheticVhd.readBlock(0)).data
await handler.read(smallRawFileName, buf, 0)
expect(content).toEqual(buf)
content = (await syntheticVhd.readBlock(1)).data
await handler.read(smallRawFileName, buf, buf.length)
expect(content).toEqual(buf)
// the next one from big
content = (await syntheticVhd.readBlock(2)).data
await handler.read(bigRawFileName, buf, buf.length * 2)
expect(content).toEqual(buf)
content = (await syntheticVhd.readBlock(3)).data
await handler.read(bigRawFileName, buf, buf.length * 3)
expect(content).toEqual(buf)
// the parent locator should the one of the root vhd
const parentLocator = await syntheticVhd.readParentLocator(0)
expect(parentLocator.platformCode).toEqual(PLATFORMS.W2KU)
expect(Buffer.from(parentLocator.data, 'utf-8').toString()).toEqual('I am in the big one')
})
})

View File

@@ -1,93 +0,0 @@
import * as UUID from 'uuid'
import { asyncMap } from '@xen-orchestra/async-map'
import { VhdAbstract } from './VhdAbstract'
import { DISK_TYPES, FOOTER_SIZE, HEADER_SIZE } from '../_constants'
import assert from 'assert'
export class VhdSynthetic extends VhdAbstract {
#vhds = []
set header(_) {
throw new Error('Header is read only for VhdSynthetic')
}
get header() {
// this the VHD we want to synthetize
const vhd = this.#vhds[0]
// this is the root VHD
const rootVhd = this.#vhds[this.#vhds.length - 1]
// data of our synthetic VHD
// TODO: set parentLocatorEntry-s in header
return {
...vhd.header,
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
parentUuid: rootVhd.header.parentUuid,
}
}
set footer(_) {
throw new Error('Footer is read only for VhdSynthetic')
}
get footer() {
// this is the root VHD
const rootVhd = this.#vhds[this.#vhds.length - 1]
return {
...this.#vhds[0].footer,
dataOffset: FOOTER_SIZE,
diskType: rootVhd.footer.diskType,
}
}
static async open(vhds) {
const vhd = new VhdSynthetic(vhds)
return {
dispose: () => {},
value: vhd,
}
}
/**
* @param {Array<VhdAbstract>} vhds the chain of Vhds used to compute this Vhd, from the deepest child (in position 0), to the root (in the last position)
* only the last one can have any type. Other must have type DISK_TYPES.DIFFERENCING (delta)
*/
constructor(vhds) {
assert(vhds.length > 0)
super()
this.#vhds = vhds
}
async readBlockAllocationTable() {
await asyncMap(this.#vhds, vhd => vhd.readBlockAllocationTable())
}
containsBlock(blockId) {
return this.#vhds.some(vhd => vhd.containsBlock(blockId))
}
async readHeaderAndFooter() {
const vhds = this.#vhds
await asyncMap(vhds, vhd => vhd.readHeaderAndFooter())
for (let i = 0, n = vhds.length - 1; i < n; ++i) {
const child = vhds[i]
const parent = vhds[i + 1]
assert.strictEqual(child.footer.diskType, DISK_TYPES.DIFFERENCING)
assert.strictEqual(UUID.stringify(child.header.parentUuid), UUID.stringify(parent.footer.uuid))
}
}
async readBlock(blockId, onlyBitmap = false) {
const index = this.#vhds.findIndex(vhd => vhd.containsBlock(blockId))
// only read the content of the first vhd containing this block
return await this.#vhds[index].readBlock(blockId, onlyBitmap)
}
_readParentLocatorData(id) {
return this.#vhds[this.#vhds.length - 1]._readParentLocatorData(id)
}
}

View File

@@ -6,11 +6,6 @@ import checkHeader from '../_checkHeader'
export const computeBatSize = entries => sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
export const computeSectorsPerBlock = blockSize => blockSize / SECTOR_SIZE
// one bit per sector
export const computeBlockBitmapSize = blockSize => computeSectorsPerBlock(blockSize) >>> 3
export const computeSectorOfBitmap = blockSize => sectorsRoundUpNoZero(computeBlockBitmapSize(blockSize))
// Sectors conversions.
export const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
export const sectorsToBytes = sectors => sectors * SECTOR_SIZE
@@ -32,7 +27,7 @@ BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
* @param {Object} footer
* @returns {Object} the parsed header
*/
export const unpackHeader = (bufHeader, footer) => {
export const buildHeader = (bufHeader, footer) => {
assertChecksum('header', bufHeader, fuHeader)
const header = fuHeader.unpack(bufHeader)
@@ -48,7 +43,7 @@ export const unpackHeader = (bufHeader, footer) => {
* @returns {Object} the parsed footer
*/
export const unpackFooter = bufFooter => {
export const buildFooter = bufFooter => {
assertChecksum('footer', bufFooter, fuFooter)
const footer = fuFooter.unpack(bufFooter)

View File

@@ -12,27 +12,19 @@ export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
export const FOOTER_COOKIE = 'conectix'
export const HEADER_COOKIE = 'cxsparse'
export const DISK_TYPES = {
__proto__: null,
FIXED: 2,
DYNAMIC: 3,
DIFFERENCING: 4,
}
export const DISK_TYPE_FIXED = 2
export const DISK_TYPE_DYNAMIC = 3
export const DISK_TYPE_DIFFERENCING = 4
export const PARENT_LOCATOR_ENTRIES = 8
export const PLATFORMS = {
__proto__: null,
NONE: 0,
WI2R: 0x57693272,
WI2K: 0x5769326b,
W2RU: 0x57327275,
W2KU: 0x57326b75,
MAC: 0x4d616320,
MACX: 0x4d616358,
}
export const PLATFORM_NONE = 0
export const PLATFORM_WI2R = 0x57693272
export const PLATFORM_WI2K = 0x5769326b
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16

View File

@@ -4,17 +4,17 @@ import { checksumStruct, fuFooter, fuHeader } from './_structs'
import {
CREATOR_APPLICATION,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPES,
DISK_TYPE_FIXED,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PLATFORMS,
PLATFORM_WI2K,
} from './_constants'
export function createFooter(size, timestamp, geometry, dataOffset, diskType = DISK_TYPES.FIXED) {
export function createFooter(size, timestamp, geometry, dataOffset, diskType = DISK_TYPE_FIXED) {
const footer = fuFooter.pack({
cookie: FOOTER_COOKIE,
features: 2,
@@ -22,7 +22,7 @@ export function createFooter(size, timestamp, geometry, dataOffset, diskType = D
dataOffset,
timestamp,
creatorApplication: CREATOR_APPLICATION,
creatorHostOs: PLATFORMS.WI2K, // it looks like everybody is using Wi2k
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
originalSize: size,
currentSize: size,
diskGeometry: geometry,

View File

@@ -32,24 +32,34 @@ test('resolve return the path in argument for a non alias file ', async () => {
test('resolve get the path of the target file for an alias', async () => {
await Disposable.use(async function* () {
// same directory
const handler = yield getSyncedHandler({ url: 'file:///' })
const tempDirFomRemoteUrl = tempDir.slice(1) // remove the / which is included in the remote url
const alias = `${tempDirFomRemoteUrl}/alias.alias.vhd`
await handler.writeFile(alias, 'target.vhd')
expect(await resolveAlias(handler, alias)).toEqual(`${tempDirFomRemoteUrl}/target.vhd`)
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
await handler.mkdir(`alias`)
const aliasPath = 'alias/alias.alias.vhd'
const testOneCombination = async ({ targetPath, targetContent }) => {
await handler.writeFile(aliasPath, targetPath, { flags: 'w' })
const resolved = await resolveAlias(handler, aliasPath)
expect(resolved).toEqual(targetContent)
await handler.unlink(aliasPath)
}
// the alias contain the relative path to the file. The resolved values is the full path from the root of the remote
const combinations = [
{ targetPath: `../targets.vhd`, targetContent: `targets.vhd` },
{ targetPath: `targets.vhd`, targetContent: `alias/targets.vhd` },
{ targetPath: `sub/targets.vhd`, targetContent: `alias/sub/targets.vhd` },
{ targetPath: `../sibling/targets.vhd`, targetContent: `sibling/targets.vhd` },
]
// different directory
await handler.mkdir(`${tempDirFomRemoteUrl}/sub/`)
await handler.writeFile(alias, 'sub/target.vhd', { flags: 'w' })
expect(await resolveAlias(handler, alias)).toEqual(`${tempDirFomRemoteUrl}/sub/target.vhd`)
for (const { targetPath, targetContent } of combinations) {
await testOneCombination({ targetPath, targetContent })
}
})
})
test('resolve throws an error an alias to an alias', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
const alias = `${tempDir}/alias.alias.vhd`
const target = `${tempDir}/target.alias.vhd`
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const alias = 'alias.alias.vhd'
const target = 'target.alias.vhd'
await handler.writeFile(alias, target)
expect(async () => await resolveAlias(handler, alias)).rejects.toThrow(Error)
})

View File

@@ -33,7 +33,7 @@ export const fuFooter = fu.struct([
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to DYNAMIC/DIFFERENCING.
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.byte('uuid', 16), // 68
fu.char('saved'), // 84

View File

@@ -1,30 +1,28 @@
import { dirname, relative } from 'path'
import { openVhd } from './'
import { DISK_TYPES } from './_constants'
import { Disposable } from 'promise-toolbox'
import { VhdFile } from './'
import { DISK_TYPE_DIFFERENCING } from './_constants'
export default async function chain(parentHandler, parentPath, childHandler, childPath, force = false) {
await Disposable.use(
[openVhd(parentHandler, parentPath), openVhd(childHandler, childPath)],
async ([parentVhd, childVhd]) => {
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
const parentVhd = new VhdFile(parentHandler, parentPath)
const childVhd = new VhdFile(childHandler, childPath)
if (footer.diskType !== DISK_TYPES.DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = DISK_TYPES.DIFFERENCING
}
await childVhd.readBlockAllocationTable()
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)
await childVhd.writeHeader()
await childVhd.writeFooter()
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
)
footer.diskType = DISK_TYPE_DIFFERENCING
}
await Promise.all([childVhd.readBlockAllocationTable(), parentVhd.readHeaderAndFooter()])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)
await childVhd.writeHeader()
await childVhd.writeFooter()
}

View File

@@ -1,14 +1,16 @@
import { openVhd } from '.'
import { VhdFile } from '.'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
import { DISK_TYPES } from './_constants'
import { Disposable } from 'promise-toolbox'
import { DISK_TYPE_DYNAMIC } from './_constants'
export default async function checkChain(handler, path) {
await Disposable.use(function* () {
let vhd
do {
vhd = yield openVhd(handler, path)
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== DISK_TYPES.DYNAMIC)
})
while (true) {
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
}

View File

@@ -1,11 +1,17 @@
import assert from 'assert'
import { DISK_TYPES, FILE_FORMAT_VERSION, FOOTER_COOKIE, FOOTER_SIZE } from './_constants'
import {
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
} from './_constants'
export default footer => {
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(footer.diskType === DISK_TYPES.DIFFERENCING || footer.diskType === DISK_TYPES.DYNAMIC)
assert(footer.diskType === DISK_TYPE_DIFFERENCING || footer.diskType === DISK_TYPE_DYNAMIC)
}

View File

@@ -0,0 +1,31 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { VhdFile } from '.'
export default asyncIteratorToStream(async function* (handler, path) {
const fd = await handler.openFile(path, 'r')
try {
const vhd = new VhdFile(handler, fd)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
const {
footer: { currentSize },
header: { blockSize },
} = vhd
const nFullBlocks = Math.floor(currentSize / blockSize)
const nLeftoverBytes = currentSize % blockSize
const emptyBlock = Buffer.alloc(blockSize)
for (let i = 0; i < nFullBlocks; ++i) {
yield vhd.containsBlock(i) ? (await vhd.readBlock(i)).data : emptyBlock
}
if (nLeftoverBytes !== 0) {
yield (vhd.containsBlock(nFullBlocks) ? (await vhd.readBlock(nFullBlocks)).data : emptyBlock).slice(
0,
nLeftoverBytes
)
}
} finally {
await handler.closeFile(fd)
}
})

View File

@@ -7,7 +7,7 @@ import { createFooter, createHeader } from './_createFooterHeader'
import {
BLOCK_UNUSED,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPES,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
@@ -73,7 +73,7 @@ export default async function createReadableStream(diskSize, fragmentSize, fragm
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
const geometry = computeGeometryForSize(diskSize)
const actualSize = geometry.actualSize
const footer = createFooter(actualSize, Math.floor(Date.now() / 1000), geometry, FOOTER_SIZE, DISK_TYPES.DYNAMIC)
const footer = createFooter(actualSize, Math.floor(Date.now() / 1000), geometry, FOOTER_SIZE, DISK_TYPE_DYNAMIC)
const header = createHeader(maxTableEntries, batPosition, VHD_BLOCK_SIZE_BYTES)
const bitmapSize = Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)

View File

@@ -0,0 +1,42 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { checkFile, createRandomFile, convertFromRawToVhd } from './tests/utils'
import { createSyntheticStream } from '.'
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test.only('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
await checkFile(vhdFileName)
const handler = getHandler({ url: 'file://' })
const stream = await createSyntheticStream(handler, vhdFileName)
const expectedVhdSize = (await fs.stat(vhdFileName)).size
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
await pFromCallback(cb => pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb))
await checkFile(recoveredVhdFileName)
const stats = await fs.stat(recoveredVhdFileName)
expect(stats.size).toEqual(expectedVhdSize)
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
})

View File

@@ -0,0 +1,165 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { createLogger } from '@xen-orchestra/log'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
import { VhdFile } from '.'
import { BLOCK_UNUSED, DISK_TYPE_DYNAMIC, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const { warn } = createLogger('vhd-lib:createSyntheticStream')
export default async function createSyntheticStream(handler, paths) {
const fds = []
const cleanup = () => {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
warn('error while closing file', {
error,
fd: fds[i],
})
})
}
}
try {
const vhds = []
const open = async path => {
const fd = await handler.openFile(path, 'r')
fds.push(fd)
const vhd = new VhdFile(handler, fd)
vhds.push(vhd)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
return vhd
}
if (typeof paths === 'string') {
let path = paths
let vhd
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
} else {
for (const path of paths) {
await open(path)
}
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// this is the root VHD
const rootVhd = vhds[nVhds - 1]
// data of our synthetic VHD
// TODO: set parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
parentUuid: rootVhd.header.parentUuid,
}
const bat = Buffer.allocUnsafe(vhd.batSize)
let footer = {
...vhd.footer,
dataOffset: FOOTER_SIZE,
diskType: rootVhd.footer.diskType,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
let blockOffset = Math.ceil((header.tableOffset + bat.length) / SECTOR_SIZE)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
const fileSize = blockOffset * SECTOR_SIZE + FOOTER_SIZE
const iterator = function* () {
try {
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
// TODO: for generic usage the bitmap needs to be properly computed for each block
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function* (iVhd, i, n) {
const vhd = vhds[iVhd]
const isRootVhd = vhd === rootVhd
if (!vhd.containsBlock(iBlock)) {
if (isRootVhd) {
yield Buffer.alloc((n - i) * SECTOR_SIZE)
} else {
yield* emitBlockSectors(iVhd + 1, i, n)
}
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd.readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (isRootVhd) {
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
} else {
yield* emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield* emitBlockSectors(owner, 0, sectorsPerBlockData)
}
yield footer
} finally {
cleanup()
}
}
const stream = asyncIteratorToStream(iterator())
stream.length = fileSize
return stream
} catch (e) {
cleanup()
throw e
}
}

View File

@@ -1,168 +0,0 @@
import { VhdDirectory } from './'
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { readChunk } from '@vates/read-chunk'
import assert from 'assert'
import { Disposable } from 'promise-toolbox'
import { unpackFooter, unpackHeader, computeBlockBitmapSize } from './Vhd/_utils'
import { asyncEach } from '@vates/async-each'
const cappedBufferConcat = (buffers, maxSize) => {
let buffer = Buffer.concat(buffers)
if (buffer.length > maxSize) {
buffer = buffer.slice(buffer.length - maxSize)
}
return buffer
}
async function* parse(stream) {
let bytesRead = 0
// handle empty space between elements
// ensure we read stream in order
async function read(offset, size) {
assert(bytesRead <= offset, `offset is ${offset} but we already read ${bytesRead} bytes`)
if (bytesRead < offset) {
// empty spaces
await read(bytesRead, offset - bytesRead)
}
const buf = await readChunk(stream, size)
assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
bytesRead += size
return buf
}
const bufFooter = await read(0, FOOTER_SIZE)
const footer = unpackFooter(bufFooter)
yield { type: 'footer', footer, offset: 0 }
const bufHeader = await read(FOOTER_SIZE, HEADER_SIZE)
const header = unpackHeader(bufHeader, footer)
yield { type: 'header', header, offset: SECTOR_SIZE }
const blockSize = header.blockSize
assert.strictEqual(blockSize % SECTOR_SIZE, 0)
const blockBitmapSize = computeBlockBitmapSize(blockSize)
const blockAndBitmapSize = blockBitmapSize + blockSize
const index = []
for (const parentLocatorId in header.parentLocatorEntry) {
const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
// empty parent locator entry, does not exist in the content
if (parentLocatorEntry.platformDataSpace === 0) {
continue
}
index.push({
...parentLocatorEntry,
type: 'parentLocator',
offset: parentLocatorEntry.platformDataOffset,
size: parentLocatorEntry.platformDataLength,
id: parentLocatorId,
})
}
const batOffset = header.tableOffset
const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
index.push({
type: 'bat',
offset: batOffset,
size: batSize,
})
// sometimes some parent locator are before the BAT
index.sort((a, b) => a.offset - b.offset)
while (index.length > 0) {
const item = index.shift()
const buffer = await read(item.offset, item.size)
if (item.type === 'bat') {
// found the BAT : read it and ad block to index
for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
// unallocated block, no need to export it
if (batEntrySector !== BLOCK_UNUSED) {
const batEntryBytes = batEntrySector * SECTOR_SIZE
// ensure the block is not before the bat
assert.ok(batEntryBytes >= batOffset + batSize)
index.push({
type: 'block',
id: blockCounter,
offset: batEntryBytes,
size: blockAndBitmapSize,
})
}
}
// sort again index to ensure block and parent locator are in the right order
index.sort((a, b) => a.offset - b.offset)
} else {
yield { ...item, buffer }
}
}
/**
* the second footer is at filesize - 512 , there can be empty spaces between last block
* and the start of the footer
*
* we read till the end of the stream, and use the last 512 bytes as the footer
*/
const bufFooterEnd = await readLastSector(stream)
assert(bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
}
function readLastSector(stream) {
return new Promise((resolve, reject) => {
let bufFooterEnd = Buffer.alloc(0)
stream.on('data', chunk => {
if (chunk.length > 0) {
bufFooterEnd = cappedBufferConcat([bufFooterEnd, chunk], SECTOR_SIZE)
}
})
stream.on('end', () => resolve(bufFooterEnd))
stream.on('error', reject)
})
}
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency }) {
const vhd = yield VhdDirectory.create(handler, path)
await asyncEach(
parse(inputStream),
async function (item) {
switch (item.type) {
case 'footer':
vhd.footer = item.footer
break
case 'header':
vhd.header = item.header
break
case 'parentLocator':
await vhd.writeParentLocator({ ...item, data: item.buffer })
break
case 'block':
await vhd.writeEntireBlock(item)
break
default:
throw new Error(`unhandled type of block generated by parser : ${item.type} while generating ${path}`)
}
},
{
concurrency,
}
)
await Promise.all([vhd.writeFooter(), vhd.writeHeader(), vhd.writeBlockAllocationTable()])
})
export async function createVhdDirectoryFromStream(handler, path, inputStream, { validator, concurrency = 16 } = {}) {
try {
await buildVhd(handler, path, inputStream, { concurrency })
if (validator !== undefined) {
await validator.call(this, path)
}
} catch (error) {
// cleanup on error
await handler.rmTree(path)
throw error
}
}

View File

@@ -1,15 +1,14 @@
export { default as chainVhd } from './chain'
export { default as checkFooter } from './checkFooter'
export { default as checkVhdChain } from './checkChain'
export { default as createContentStream } from './createContentStream'
export { default as createReadableRawStream } from './createReadableRawStream'
export { default as createReadableSparseStream } from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { createVhdDirectoryFromStream } from './createVhdDirectoryFromStream'
export { default as mergeVhd } from './merge'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
export { openVhd } from './openVhd'
export { VhdAbstract } from './Vhd/VhdAbstract'
export { VhdDirectory } from './Vhd/VhdDirectory'
export { VhdFile } from './Vhd/VhdFile'
export { VhdSynthetic } from './Vhd/VhdSynthetic'
export * as Constants from './_constants'

View File

@@ -1,5 +1,6 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
@@ -8,7 +9,8 @@ import { pFromCallback } from 'promise-toolbox'
import { VhdFile, chainVhd, mergeVhd as vhdMerge } from './index'
import { checkFile, createRandomFile, convertFromRawToVhd } from './tests/utils'
import { SECTOR_SIZE } from './_constants'
import { checkFile, createRandomFile, convertFromRawToVhd, recoverRawContent } from './tests/utils'
let tempDir = null
@@ -22,136 +24,55 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('merge works in normal cases', async () => {
const mbOfFather = 8
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const child1FileName = `${tempDir}/child1.vhd`
const handler = getHandler({ url: 'file://' })
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
const randomFilePath = `${tempDir}/randomfile`
const random2FilePath = `${tempDir}/randomfile2`
const smallRandomFilePath = `${tempDir}/small_randomfile`
const parentFilePath = `${tempDir}/parent.vhd`
const child1FilePath = `${tempDir}/child1.vhd`
const child2FilePath = `${tempDir}/child2.vhd`
const recoveredFilePath = `${tempDir}/recovered`
await createRandomFile(randomFilePath, mbOfRandom)
await createRandomFile(smallRandomFilePath, Math.ceil(mbOfRandom / 2))
await execa('qemu-img', ['create', '-fvpc', parentFilePath, mbOfRandom + 1 + 'M'])
await checkFile(parentFilePath)
await convertFromRawToVhd(randomFilePath, child1FilePath)
const handler = getHandler({ url: `file://${tempDir}/` })
await execa('vhd-util', ['snapshot', '-n', child2FilePath, '-p', child1FilePath])
const vhd = new VhdFile(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
await createRandomFile(parentRandomFileName, mbOfFather)
await convertFromRawToVhd(parentRandomFileName, parentFileName)
await createRandomFile(childRandomFileName, mbOfChildren)
await convertFromRawToVhd(childRandomFileName, child1FileName)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
// merge
await vhdMerge(handler, parentFileName, handler, child1FileName)
// check that vhd is still valid
await checkFile(parentFileName)
const parentVhd = new VhdFile(handler, parentFileName)
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
let offset = 0
// check that the data are the same as source
for await (const block of parentVhd.blocks()) {
const blockContent = block.data
const file = offset < mbOfChildren * 1024 * 1024 ? childRandomFileName : parentRandomFileName
const buffer = Buffer.alloc(blockContent.length)
const fd = await fs.open(file, 'r')
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += parentVhd.header.blockSize
}
})
test('it can resume a merge ', async () => {
const mbOfFather = 8
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const handler = getHandler({ url: `file://${tempDir}` })
await createRandomFile(`${tempDir}/randomfile`, mbOfFather)
await convertFromRawToVhd(`${tempDir}/randomfile`, `${tempDir}/parent.vhd`)
const parentVhd = new VhdFile(handler, 'parent.vhd')
await parentVhd.readHeaderAndFooter()
await createRandomFile(`${tempDir}/small_randomfile`, mbOfChildren)
await convertFromRawToVhd(`${tempDir}/small_randomfile`, `${tempDir}/child1.vhd`)
const originalSize = await handler._getSize('randomfile')
await checkFile(child1FilePath)
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
const childVhd = new VhdFile(handler, 'child1.vhd')
await childVhd.readHeaderAndFooter()
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
},
child: {
header: 'NOT CHILD HEADER ',
},
})
)
// expect merge to fail since child header is not ok
await expect(async () => await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
await handler.unlink('.parent.vhd.merge.json')
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: 'NOT PARENT HEADER',
},
child: {
header: childVhd.header.checksum,
},
})
)
// expect merge to fail since parent header is not ok
await expect(async () => await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
// break the end footer of parent
const size = await handler.getSize('parent.vhd')
const fd = await handler.openFile('parent.vhd', 'r+')
const buffer = Buffer.alloc(512, 0)
// add a fake footer at the end
handler.write(fd, buffer, size)
await handler.closeFile(fd)
// check vhd should fail
await expect(async () => await parentVhd.readHeaderAndFooter()).rejects.toThrow()
await handler.unlink('.parent.vhd.merge.json')
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
},
child: {
header: childVhd.header.checksum,
},
currentBlock: 1,
})
)
// really merge
await checkFile(child1FilePath)
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
await checkFile(child2FilePath)
const smallRandom = await fs.readFile(smallRandomFilePath)
const newVhd = new VhdFile(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile(child2FilePath)
await checkFile(child1FilePath)
await checkFile(parentFilePath)
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
// reload header footer and block allocation table , they should succed
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
let offset = 0
// check that the data are the same as source
for await (const block of parentVhd.blocks()) {
const blockContent = block.data
// first block is marked as already merged, should not be modified
// second block should come from children
// then two block only in parent
const file = block.id === 1 ? childRandomFileName : parentRandomFileName
const buffer = Buffer.alloc(blockContent.length)
const fd = await fs.open(file, 'r')
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += parentVhd.header.blockSize
await checkFile(parentFilePath)
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
await checkFile(child2FilePath)
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
await checkFile(parentFilePath)
await recoverRawContent(parentFilePath, recoveredFilePath, originalSize)
await execa('cp', [randomFilePath, random2FilePath])
const fd = await fs.open(random2FilePath, 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
expect(await fs.readFile(recoveredFilePath)).toEqual(await fs.readFile(random2FilePath))
})

View File

@@ -5,10 +5,9 @@ import noop from './_noop'
import { createLogger } from '@xen-orchestra/log'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { openVhd } from '.'
import { VhdFile } from '.'
import { basename, dirname } from 'path'
import { DISK_TYPES } from './_constants'
import { Disposable } from 'promise-toolbox'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
const { warn } = createLogger('vhd-lib:merge')
@@ -24,99 +23,109 @@ export default limitConcurrency(2)(async function merge(
) {
const mergeStatePath = dirname(parentPath) + '/' + '.' + basename(parentPath) + '.merge.json'
return await Disposable.use(async function* () {
let mergeState = await parentHandler.readFile(mergeStatePath).catch(error => {
if (error.code !== 'ENOENT') {
throw error
}
// no merge state in case of missing file
})
// during merging, the end footer of the parent can be overwritten by new blocks
// we should use it as a way to check vhd health
const parentVhd = yield openVhd(parentHandler, parentPath, {
flags: 'r+',
checkSecondFooter: mergeState === undefined,
})
const childVhd = yield openVhd(childHandler, childPath)
if (mergeState !== undefined) {
mergeState = JSON.parse(mergeState)
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new VhdFile(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new VhdFile(childHandler, childFd)
// ensure the correct merge will be continued
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
} else {
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(parentDiskType === DISK_TYPES.DIFFERENCING || parentDiskType === DISK_TYPES.DYNAMIC)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
}
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockAllocationTable(), childVhd.readBlockAllocationTable()])
const { maxTableEntries } = childVhd.header
if (mergeState === undefined) {
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
mergeState = {
child: { header: childVhd.header.checksum },
parent: { header: parentVhd.header.checksum },
currentBlock: 0,
mergedDataSize: 0,
}
// finds first allocated block for the 2 following loops
while (mergeState.currentBlock < maxTableEntries && !childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
}
// counts number of allocated blocks
let nBlocks = 0
for (let block = mergeState.currentBlock; block < maxTableEntries; block++) {
if (childVhd.containsBlock(block)) {
nBlocks += 1
}
}
onProgress({ total: nBlocks, done: 0 })
// merges blocks
for (let i = 0; i < nBlocks; ++i, ++mergeState.currentBlock) {
while (!childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
await parentHandler.writeFile(mergeStatePath, JSON.stringify(mergeState), { flags: 'w' }).catch(warn)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, mergeState.currentBlock)
onProgress({
total: nBlocks,
done: i + 1,
let mergeState = await parentHandler.readFile(mergeStatePath).catch(error => {
if (error.code !== 'ENOENT') {
throw error
}
// no merge state in case of missing file
})
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(
// dont check VHD is complete if recovering a merge
mergeState === undefined
),
childVhd.readHeaderAndFooter(),
])
if (mergeState !== undefined) {
mergeState = JSON.parse(mergeState)
// ensure the correct merge will be continued
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
} else {
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(parentDiskType === DISK_TYPE_DIFFERENCING || parentDiskType === DISK_TYPE_DYNAMIC)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
}
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockAllocationTable(), childVhd.readBlockAllocationTable()])
const { maxTableEntries } = childVhd.header
if (mergeState === undefined) {
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
mergeState = {
child: { header: childVhd.header.checksum },
parent: { header: parentVhd.header.checksum },
currentBlock: 0,
mergedDataSize: 0,
}
// finds first allocated block for the 2 following loops
while (mergeState.currentBlock < maxTableEntries && !childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
}
// counts number of allocated blocks
let nBlocks = 0
for (let block = mergeState.currentBlock; block < maxTableEntries; block++) {
if (childVhd.containsBlock(block)) {
nBlocks += 1
}
}
onProgress({ total: nBlocks, done: 0 })
// merges blocks
for (let i = 0; i < nBlocks; ++i, ++mergeState.currentBlock) {
while (!childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
await parentHandler.writeFile(mergeStatePath, JSON.stringify(mergeState), { flags: 'w' }).catch(warn)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, mergeState.currentBlock)
onProgress({
total: nBlocks,
done: i + 1,
})
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergeState.mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
// some blocks could have been created or moved in parent : write bat
await parentVhd.writeBlockAllocationTable()
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
// should be a disposable
} finally {
parentHandler.unlink(mergeStatePath).catch(warn)
return mergeState.mergedDataSize
})
await parentHandler.closeFile(parentFd)
}
})

View File

@@ -29,16 +29,16 @@ test('It opens a vhd file ( alias or not)', async () => {
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' })
const vhd = yield openVhd(handler, vhdFileName)
const handler = yield getSyncedHandler({ url: `file://${tempDir}/` })
const vhd = yield openVhd(handler, 'randomfile.vhd')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
const aliasFileName = `${tempDir}/out.alias.vhd`
await VhdAbstract.createAlias(handler, aliasFileName, vhdFileName)
const alias = yield openVhd(handler, aliasFileName)
await VhdAbstract.createAlias(handler, 'out.alias.vhd', 'randomfile.vhd')
const alias = yield openVhd(handler, 'out.alias.vhd')
expect(alias.header.cookie).toEqual('cxsparse')
expect(alias.footer.cookie).toEqual('conectix')
expect(alias._path?.path).toEqual('/randomfile.vhd')
})
})
@@ -48,15 +48,15 @@ test('It opens a vhd directory', async () => {
await createRandomVhdDirectory(vhdDirectory, initalSize)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' })
const vhd = yield openVhd(handler, vhdDirectory)
const handler = yield getSyncedHandler({ url: `file://${tempDir}/` })
const vhd = yield openVhd(handler, 'randomfile.dir')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
const aliasFileName = `${tempDir}/out.alias.vhd`
await VhdAbstract.createAlias(handler, aliasFileName, vhdDirectory)
const alias = yield openVhd(handler, aliasFileName)
await VhdAbstract.createAlias(handler, 'out.alias.vhd', 'randomfile.dir')
const alias = yield openVhd(handler, 'out.alias.vhd')
expect(alias.header.cookie).toEqual('cxsparse')
expect(alias.footer.cookie).toEqual('conectix')
expect(alias._path).toEqual('randomfile.dir')
})
})

View File

@@ -1,14 +1,14 @@
import { resolveAlias } from './_resolveAlias'
import { VhdFile, VhdDirectory } from './'
export async function openVhd(handler, path, opts) {
export async function openVhd(handler, path) {
const resolved = await resolveAlias(handler, path)
try {
return await VhdFile.open(handler, resolved, opts)
return await VhdFile.open(handler, resolved)
} catch (e) {
if (e.code !== 'EISDIR') {
throw e
}
return await VhdDirectory.open(handler, resolved, opts)
return await VhdDirectory.open(handler, resolved)
}
}

View File

@@ -42,7 +42,6 @@ export async function convertFromVmdkToRaw(vmdkName, rawName) {
}
export async function recoverRawContent(vhdName, rawName, originalSize) {
// todo should use createContentStream
await checkFile(vhdName)
await convertFromVhdToRaw(vhdName, rawName)
if (originalSize !== undefined) {
@@ -50,9 +49,12 @@ export async function recoverRawContent(vhdName, rawName, originalSize) {
}
}
// @ todo how can I call vhd-cli copy from here
export async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
fs.mkdirp(path)
export async function createRandomVhdDirectory(path, sizeMB) {
fs.mkdir(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)
const vhdFileName = `${path}/vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const srcVhd = await fs.open(vhdFileName, 'r')
@@ -73,26 +75,13 @@ export async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
// copy blocks
const srcRaw = await fs.open(rawFileName, 'r')
const blockDataSize = 512 * 4096
// make a block bitmap full of 1, marking all sectors of the block as used
const bitmap = Buffer.alloc(512, 255)
const bitmap = Buffer.alloc(4096)
await fs.mkdir(path + '/blocks/')
await fs.mkdir(path + '/blocks/0/')
const stats = await fs.stat(rawFileName)
const sizeMB = stats.size / 1024 / 1024
await fs.mkdir(path + '/blocks/1/')
for (let i = 0, offset = 0; i < sizeMB; i++, offset += blockDataSize) {
const blockData = Buffer.alloc(blockDataSize)
await fs.read(srcRaw, blockData, offset)
await fs.writeFile(path + '/blocks/0/' + i, Buffer.concat([bitmap, blockData]))
await fs.writeFile(path + '/blocks/1/' + i, Buffer.concat([bitmap, blockData]))
}
await fs.close(srcRaw)
}
export async function createRandomVhdDirectory(path, sizeMB) {
fs.mkdirp(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)
const vhdFileName = `${path}/temp.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await convertToVhdDirectory(rawFileName, vhdFileName, path)
}

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^2.0.1"
"vhd-lib": "^1.3.0"
}
}

View File

@@ -20,12 +20,9 @@ Installation of the [npm package](https://npmjs.org/package/xo-cli):
> xo-cli --help
Usage:
xo-cli --register [--allowUnauthorized] [--expiresIn duration] <XO-Server URL> <username> [<password>]
xo-cli --register [--expiresIn duration] <XO-Server URL> <username> [<password>]
Registers the XO instance to use.
--allowUnauthorized, --au
Accept invalid certificate (e.g. self-signed).
--expiresIn duration
Can be used to change the validity duration of the
authorization token (default: one month).

View File

@@ -2,12 +2,9 @@
> xo-cli --help
Usage:
xo-cli --register [--allowUnauthorized] [--expiresIn duration] <XO-Server URL> <username> [<password>]
xo-cli --register [--expiresIn duration] <XO-Server URL> <username> [<password>]
Registers the XO instance to use.
--allowUnauthorized, --au
Accept invalid certificate (e.g. self-signed).
--expiresIn duration
Can be used to change the validity duration of the
authorization token (default: one month).

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-cli",
"version": "0.12.0",
"version": "0.11.1",
"license": "AGPL-3.0-or-later",
"description": "Basic CLI for Xen-Orchestra",
"keywords": [
@@ -34,7 +34,6 @@
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-extra": "^10.0.0",
"getopts": "^2.3.0",
"http-request-plus": "^0.13.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
@@ -48,7 +47,7 @@
"pw": "^0.0.4",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1"
"xo-lib": "^0.10.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -14,7 +14,6 @@ const chalk = require('chalk')
const forEach = require('lodash/forEach')
const fromCallback = require('promise-toolbox/fromCallback')
const getKeys = require('lodash/keys')
const getopts = require('getopts')
const hrp = require('http-request-plus')
const humanFormat = require('human-format')
const identity = require('lodash/identity')
@@ -36,7 +35,7 @@ const config = require('./config')
// ===================================================================
async function connect() {
const { allowUnauthorized, server, token } = await config.load()
const { server, token } = await config.load()
if (server === undefined) {
throw new Error('no server to connect to!')
}
@@ -45,7 +44,7 @@ async function connect() {
throw new Error('no token available')
}
const xo = new Xo({ rejectUnauthorized: !allowUnauthorized, url: server })
const xo = new Xo({ url: server })
await xo.open()
await xo.signIn({ token })
return xo
@@ -151,12 +150,9 @@ const help = wrap(
`
Usage:
$name --register [--allowUnauthorized] [--expiresIn duration] <XO-Server URL> <username> [<password>]
$name --register [--expiresIn duration] <XO-Server URL> <username> [<password>]
Registers the XO instance to use.
--allowUnauthorized, --au
Accept invalid certificate (e.g. self-signed).
--expiresIn duration
Can be used to change the validity duration of the
authorization token (default: one month).
@@ -235,35 +231,29 @@ exports = module.exports = main
exports.help = help
async function register(args) {
const {
allowUnauthorized,
expiresIn,
_: [
url,
email,
password = await new Promise(function (resolve) {
process.stdout.write('Password: ')
pw(resolve)
}),
],
} = getopts(args, {
alias: {
allowUnauthorized: 'au',
},
boolean: ['allowUnauthorized'],
stopEarly: true,
string: ['expiresIn'],
})
let expiresIn
if (args[0] === '--expiresIn') {
expiresIn = args[1]
args = args.slice(2)
}
const xo = new Xo({ rejectUnauthorized: !allowUnauthorized, url })
const [
url,
email,
password = await new Promise(function (resolve) {
process.stdout.write('Password: ')
pw(resolve)
}),
] = args
const xo = new Xo({ url })
await xo.open()
await xo.signIn({ email, password })
console.log('Successfully logged with', xo.user.email)
await config.set({
allowUnauthorized,
server: url,
token: await xo.call('token.create', { expiresIn: expiresIn === '' ? undefined : expiresIn }),
token: await xo.call('token.create', { expiresIn }),
})
}
exports.register = register

View File

@@ -37,7 +37,7 @@
"csv-parser": "^3.0.0",
"exec-promise": "^0.7.0",
"through2": "^4.0.2",
"xo-lib": "^0.11.1"
"xo-lib": "^0.10.1"
},
"devDependencies": {
"@types/node": "^16.11.6",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-lib",
"version": "0.11.1",
"version": "0.10.1",
"license": "AGPL-3.0-or-later",
"description": "Library to connect to XO-Server",
"keywords": [
@@ -26,10 +26,10 @@
"> 2%"
],
"engines": {
"node": ">=10"
"node": ">=6"
},
"dependencies": {
"jsonrpc-websocket-client": "^0.7.2",
"jsonrpc-websocket-client": "^0.6.0",
"lodash": "^4.17.2",
"make-error": "^1.0.4"
},

View File

@@ -13,9 +13,8 @@ export class XoError extends BaseError {}
// -------------------------------------------------------------------
export default class Xo extends JsonRpcWebSocketClient {
constructor({ credentials, url = '.', ...opts } = {}) {
opts.url = `${trimEnd(url, '/')}/api/`
super(opts)
constructor({ credentials, url = '.' } = {}) {
super(`${trimEnd(url, '/')}/api/`)
this._credentials = credentials
this._user = null

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-remote-parser",
"version": "0.8.0",
"version": "0.7.0",
"license": "AGPL-3.0-or-later",
"description": "Parse and format XO remote URLs",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-remote-parser",

View File

@@ -2,7 +2,7 @@ import filter from 'lodash/filter'
import map from 'lodash/map'
import trim from 'lodash/trim'
import trimStart from 'lodash/trimStart'
import urlParser from 'url-parse'
import Url from 'url-parse'
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:]+)$/
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0]+)(?:\0(.*))?$/
@@ -38,7 +38,7 @@ export const parse = string => {
object.username = username
object.password = password
} else if (type === 's3' || type === 's3+http') {
const parsed = urlParser(string, true)
const parsed = new Url(string)
object.protocol = parsed.protocol === 's3:' ? 'https' : 'http'
object.type = 's3'
object.region = parsed.hash.length === 0 ? undefined : parsed.hash.slice(1) // remove '#'
@@ -46,24 +46,11 @@ export const parse = string => {
object.path = parsed.pathname
object.username = parsed.username
object.password = decodeURIComponent(parsed.password)
const qs = parsed.query
object.allowUnauthorized = qs.allowUnauthorized === 'true'
}
return object
}
export const format = ({
type,
host,
path,
port,
username,
password,
domain,
protocol = type,
region,
allowUnauthorized = false,
}) => {
export const format = ({ type, host, path, port, username, password, domain, protocol = type, region }) => {
type === 'local' && (type = 'file')
let string = `${type}://`
if (type === 'nfs') {
@@ -84,10 +71,6 @@ export const format = ({
path = `/${path}`
}
string += path
if (type === 's3' && allowUnauthorized === true) {
string += `?allowUnauthorized=true`
}
if (type === 's3' && region !== undefined) {
string += `#${region}`
}

View File

@@ -54,20 +54,6 @@ const data = deepFreeze({
username: 'AKIAS',
password: 'XSuBupZ0mJlu+',
region: undefined,
allowUnauthorized: false,
},
},
's3 accepting self signed ': {
string: 's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir?allowUnauthorized=true',
object: {
type: 's3',
protocol: 'https',
host: 's3-us-west-2.amazonaws.com',
path: '/test-bucket/dir',
username: 'AKIAS',
password: 'XSuBupZ0mJlu+',
region: undefined,
allowUnauthorized: true,
},
},
})
@@ -111,33 +97,6 @@ const parseData = deepFreeze({
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: false,
},
},
'S3 accepting self signed certificate': {
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir?allowUnauthorized=true#reg1',
object: {
type: 's3',
host: '192.168.100.225',
protocol: 'http',
path: '/bucket/dir',
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: true,
},
'S3 with broken allowUnauthorized': {
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir?allowUnauthorized=notTrue#reg1',
object: {
type: 's3',
host: '192.168.100.225',
protocol: 'http',
path: '/bucket/dir',
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: false,
},
},
},
})
@@ -152,19 +111,6 @@ const formatData = deepFreeze({
path: '/var/lib/xoa/backup',
},
},
'S3 with broken allowUnauthorized': {
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir#reg1',
object: {
type: 's3',
host: '192.168.100.225',
protocol: 'http',
path: '/bucket/dir',
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: 'notTrue',
},
},
})
// -------------------------------------------------------------------

View File

@@ -36,7 +36,7 @@
"promise-toolbox": "^0.20.0",
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",
"xo-lib": "^0.11.1"
"xo-lib": "^0.10.1"
},
"scripts": {
"dev-test": "jest --bail --watch",

View File

@@ -32,7 +32,7 @@
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.3.0",
"csv-stringify": "^6.0.0",
"csv-stringify": "^5.5.0",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",
"human-format": "^0.11.0",

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.84.1",
"version": "5.83.0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -35,17 +35,17 @@
"@vates/parse-duration": "^0.1.1",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.0",
"@xen-orchestra/backups": "^0.15.1",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^0.8.4",
"@xen-orchestra/xapi": "^0.8.0",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.0.1",
@@ -54,7 +54,6 @@
"blocked-at": "^1.2.0",
"bluebird": "^3.5.1",
"body-parser": "^1.18.2",
"complex-matcher": "^0.7.0",
"compression": "^1.7.3",
"connect-flash": "^0.1.1",
"content-type": "^1.0.4",
@@ -123,14 +122,14 @@
"unzipper": "^0.10.5",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^2.0.1",
"vhd-lib": "^1.3.0",
"ws": "^8.2.3",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.35.1",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",
"xo-remote-parser": "^0.8.0",
"xo-remote-parser": "^0.7.0",
"xo-vmdk-to-vhd": "^2.0.0"
},
"devDependencies": {

View File

@@ -4,13 +4,7 @@ export function getAll() {
getAll.description = 'Gets all existing cloud configs templates'
export function getAllNetworkConfigs() {
return this.getAllNetworkConfigs()
}
getAllNetworkConfigs.description = 'Gets all existing network config templates'
export async function create(props) {
export function create(props) {
return this.createCloudConfig(props)
}
@@ -21,17 +15,6 @@ create.params = {
template: { type: 'string' },
}
export function createNetworkConfig(props) {
return this.createCloudConfig({ ...props, type: 'network' })
}
createNetworkConfig.permission = 'admin'
createNetworkConfig.description = 'Creates a new network config template'
createNetworkConfig.params = {
name: { type: 'string' },
template: { type: 'string' },
}
export function update(props) {
return this.updateCloudConfig(props)
}

View File

@@ -1,5 +1,4 @@
import { format } from 'json-rpc-peer'
import { Ref } from 'xen-api'
// ===================================================================
@@ -10,7 +9,6 @@ export async function set({
name_label: nameLabel,
backupNetwork,
migrationNetwork,
suspendSr,
}) {
pool = this.getXapiObject(pool)
@@ -19,7 +17,6 @@ export async function set({
nameLabel !== undefined && pool.set_name_label(nameLabel),
migrationNetwork !== undefined && pool.update_other_config('xo:migrationNetwork', migrationNetwork),
backupNetwork !== undefined && pool.update_other_config('xo:backupNetwork', backupNetwork),
suspendSr !== undefined && pool.$call('set_suspend_image_SR', suspendSr === null ? Ref.EMPTY : suspendSr._xapiRef),
])
}
@@ -43,15 +40,10 @@ set.params = {
type: ['string', 'null'],
optional: true,
},
suspendSr: {
type: ['string', 'null'],
optional: true,
},
}
set.resolve = {
pool: ['id', 'pool', 'administrate'],
suspendSr: ['suspendSr', 'SR', 'administrate'],
}
// -------------------------------------------------------------------

View File

@@ -9,7 +9,6 @@ import { FAIL_ON_QUEUE } from 'limit-concurrency-decorator'
import { format } from 'json-rpc-peer'
import { ignoreErrors } from 'promise-toolbox'
import { invalidParameters, noSuchObject, operationFailed, unauthorized } from 'xo-common/api-errors.js'
import { Ref } from 'xen-api'
import { forEach, map, mapFilter, parseSize, safeDateFormat } from '../utils.mjs'
@@ -535,11 +534,6 @@ export const set = defer(async function ($defer, params) {
await this.shareVmResourceSet(vmId)
}
const suspendSr = extract(params, 'suspendSr')
if (suspendSr !== undefined) {
await xapi.call('VM.set_suspend_SR', VM._xapiRef, suspendSr === null ? Ref.EMPTY : suspendSr._xapiRef)
}
return xapi.editVm(vmId, params, async (limits, vm) => {
const resourceSet = xapi.xo.getData(vm, 'resourceSet')
@@ -638,13 +632,10 @@ set.params = {
virtualizationMode: { type: 'string', optional: true },
blockedOperations: { type: 'object', optional: true },
suspendSr: { type: ['string', 'null'], optional: true },
}
set.resolve = {
VM: ['id', ['VM', 'VM-snapshot', 'VM-template'], 'administrate'],
suspendSr: ['suspendSr', 'SR', 'administrate'],
}
// -------------------------------------------------------------------

View File

@@ -1,4 +1,3 @@
import * as CM from 'complex-matcher'
import getStream from 'get-stream'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
@@ -53,10 +52,6 @@ function handleGetAllObjects(req, res, { filter, limit }) {
}
export function getAllObjects({ filter, limit, ndjson = false }) {
if (typeof filter === 'string') {
filter = CM.parse(filter).createPredicate()
}
return ndjson
? this.registerHttpRequest(handleGetAllObjects, {
filter,
@@ -68,7 +63,7 @@ export function getAllObjects({ filter, limit, ndjson = false }) {
getAllObjects.description = 'Returns all XO objects'
getAllObjects.params = {
filter: { type: ['object', 'string'], optional: true },
filter: { type: 'object', optional: true },
limit: { type: 'number', optional: true },
ndjson: { type: 'boolean', optional: true },
}

View File

@@ -471,7 +471,6 @@ const TRANSFORMS = {
vm.snapshot_time = toTimestamp(obj.snapshot_time)
vm.$snapshot_of = link(obj, 'snapshot_of')
vm.suspendVdi = link(obj, 'suspend_VDI')
} else if (obj.is_a_template) {
const defaultTemplate = isDefaultTemplate(obj)
vm.type += '-template'

View File

@@ -161,15 +161,7 @@ export default class Xapi extends XapiBase {
// =================================================================
async joinPool(masterAddress, masterUsername, masterPassword, force = false) {
try {
await this.call(force ? 'pool.join_force' : 'pool.join', masterAddress, masterUsername, masterPassword)
} catch (error) {
const params = error?.call?.params
if (Array.isArray(params)) {
params[2] = '* obfuscated *'
}
throw error
}
await this.call(force ? 'pool.join_force' : 'pool.join', masterAddress, masterUsername, masterPassword)
}
// =================================================================

View File

@@ -115,10 +115,7 @@ async function resolveParams(method, params) {
const permissions = []
forEach(resolve, ([param, types, permission = 'administrate'], key) => {
const id = params[param]
if (
id === undefined || // optional param not used
id === null // explicit value to unset
) {
if (id === undefined) {
return
}

View File

@@ -41,12 +41,8 @@ export default class {
return this._db.remove(id)
}
async getAllCloudConfigs() {
return (await this._db.get()).filter(({ type }) => type === undefined)
}
async getAllNetworkConfigs() {
return (await this._db.get()).filter(({ type }) => type === 'network')
getAllCloudConfigs() {
return this._db.get()
}
async getCloudConfig(id) {

View File

@@ -23,7 +23,7 @@ import { timeout } from 'promise-toolbox'
import Collection from '../collection/redis.mjs'
import patch from '../patch.mjs'
import { debounceWithKey, REMOVE_CACHE_ENTRY } from '../_pDebounceWithKey.mjs'
import { debounceWithKey } from '../_pDebounceWithKey.mjs'
import { extractIpFromVmNetworks } from '../_extractIpFromVmNetworks.mjs'
import { generateToken } from '../utils.mjs'
@@ -191,8 +191,6 @@ export default class Proxy {
await xapi.startVm(vmUuid)
}
this.getProxyApplianceUpdaterState(REMOVE_CACHE_ENTRY, id)
await xapi._waitObjectState(vmUuid, vm => extractIpFromVmNetworks(vm.$guest_metrics?.networks) !== undefined)
}

View File

@@ -25,7 +25,7 @@
"lodash": "^4.17.15",
"pako": "^2.0.4",
"promise-toolbox": "^0.20.0",
"vhd-lib": "^2.0.1",
"vhd-lib": "^1.3.0",
"xml2js": "^0.4.23"
},
"devDependencies": {
@@ -37,6 +37,7 @@
"execa": "^5.0.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"promise-toolbox": "^0.19.2",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
},

View File

@@ -35,7 +35,7 @@ export function parseU64b(buffer, offset, valueName) {
return value
}
export function unpackHeader(buffer) {
export function parseHeader(buffer) {
const magicString = buffer.slice(0, 4).toString('ascii')
if (magicString !== 'KDMV') {
throw new Error('not a VMDK file')

View File

@@ -1,7 +1,7 @@
import assert from 'assert'
import zlib from 'zlib'
import { compressionDeflate, unpackHeader, parseU64b } from './definitions'
import { compressionDeflate, parseHeader, parseU64b } from './definitions'
import { VirtualBuffer } from './virtual-buffer'
const SECTOR_SIZE = 512
@@ -132,7 +132,7 @@ export default class VMDKDirectParser {
if (version !== 1 && version !== 3) {
throw new Error('unsupported VMDK version ' + version + ', only version 1 and 3 are supported')
}
this.header = unpackHeader(headerBuffer)
this.header = parseHeader(headerBuffer)
// I think the multiplications are OK, because the descriptor is always at the beginning of the file
const descriptorLength = this.header.descriptorSizeSectors * SECTOR_SIZE
const descriptorBuffer = await this.virtualBuffer.readChunk(descriptorLength, 'descriptor')

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-web",
"version": "5.90.0",
"version": "5.89.0",
"license": "AGPL-3.0-or-later",
"description": "Web interface client for Xen-Orchestra",
"keywords": [
@@ -80,7 +80,7 @@
"index-modules": "^0.4.3",
"is-ip": "^3.1.0",
"js-cookie": "2.2.1",
"jsonrpc-websocket-client": "^0.7.2",
"jsonrpc-websocket-client": "^0.6.0",
"kindof": "^2.0.0",
"lodash": "^4.6.1",
"loose-envify": "^1.1.0",
@@ -136,8 +136,8 @@
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
"xo-common": "^0.7.0",
"xo-lib": "^0.11.1",
"xo-remote-parser": "^0.8.0",
"xo-lib": "^0.10.1",
"xo-remote-parser": "^0.7.0",
"xo-vmdk-to-vhd": "^2.0.0"
},
"scripts": {

View File

@@ -357,7 +357,6 @@ const messages = {
fillOptionalInformations: 'Fill information (optional)',
selectTableReset: 'Reset',
selectCloudConfigs: 'Select cloud config(s)…',
selectNetworkConfigs: 'Select network config(s)…',
// --- Dates/Scheduler ---
@@ -582,13 +581,10 @@ const messages = {
remoteSmbPlaceHolderDomain: 'Domain',
remoteSmbPlaceHolderAddressShare: '<address>\\\\<share>',
remoteSmbPlaceHolderOptions: 'Custom mount options',
remoteS3LabelUseHttps: 'Use HTTPS',
remoteS3LabelAllowInsecure: 'Allow unauthorized',
remoteS3PlaceHolderBucket: 'AWS S3 bucket name',
remoteS3PlaceHolderDirectory: 'Directory',
remoteS3Region: 'Region, leave blank for default',
remoteS3TooltipProtocol: 'Uncheck if you want HTTP instead of HTTPS',
remoteS3TooltipAcceptInsecure: 'Check if you want to accept self signed certificates',
remoteS3TooltipProtocol: 'Check if you want HTTP instead of HTTPS',
remotePlaceHolderPassword: 'Password(fill to edit)',
// ------ New Storage -----
@@ -1204,7 +1200,6 @@ const messages = {
deleteSnapshots: 'Remove selected snapshots',
copySnapshot: 'Create a VM from this snapshot',
exportSnapshot: 'Export this snapshot',
exportSnapshotMemory: 'Export snapshot memory',
secureBoot: 'Secure boot',
snapshotDate: 'Creation date',
snapshotError: 'Snapshot error',
@@ -2127,14 +2122,9 @@ const messages = {
confirmDeleteCloudConfigsTitle: 'Delete cloud config{nCloudConfigs, plural, one {} other {s}}',
confirmDeleteCloudConfigsBody:
'Are you sure you want to delete {nCloudConfigs, number} cloud config{nCloudConfigs, plural, one {} other {s}}?',
confirmDeleteNetworkConfigsTitle: 'Delete network config{nNetworkConfigs, plural, one {} other {s}}',
confirmDeleteNetworkConfigsBody:
'Are you sure you want to delete {nNetworkConfigs, number} network config{nNetworkConfigs, plural, one {} other {s}}?',
deleteCloudConfig: 'Delete cloud config',
editCloudConfig: 'Edit cloud config',
deleteSelectedCloudConfigs: 'Delete selected cloud configs',
networkConfig: 'Network config',
cloudConfig: 'Cloud config',
// ----- Config -----
noConfigFile: 'No config file selected',

View File

@@ -45,7 +45,6 @@ import {
subscribeCurrentUser,
subscribeGroups,
subscribeIpPools,
subscribeNetworkConfigs,
subscribeProxies,
subscribeRemotes,
subscribeResourceSets,
@@ -1064,16 +1063,3 @@ export const SelectCloudConfig = makeSubscriptionSelect(
}),
{ placeholder: _('selectCloudConfigs') }
)
export const SelectNetworkConfig = makeSubscriptionSelect(
subscriber =>
subscribeNetworkConfigs(networkConfigs => {
subscriber({
xoObjects: map(sortBy(networkConfigs, 'name'), networkConfigs => ({
...networkConfigs,
type: 'cloudConfig',
})),
})
}),
{ placeholder: _('selectNetworkConfigs') }
)

View File

@@ -1410,9 +1410,6 @@ export const deleteSnapshots = vms =>
body: _('deleteSnapshotsModalMessage', { nVms: vms.length }),
}).then(() => Promise.all(map(vms, vm => _call('vm.delete', { id: resolveId(vm) }))), noop)
// checkpoint snapshot is in a Suspended state
export const isCheckpointSnapshot = ({ power_state }) => power_state === 'Suspended'
import MigrateVmModalBody from './migrate-vm-modal' // eslint-disable-line import/first
export const migrateVm = async (vm, host) => {
let params
@@ -1716,9 +1713,8 @@ export const exportVm = async vm => {
}
export const exportVdi = vdi => {
const id = resolveId(vdi)
info(_('startVdiExport'), id)
return _call('disk.exportContent', { id }).then(({ $getFrom: url }) => {
info(_('startVdiExport'), vdi.id)
return _call('disk.exportContent', { id: resolveId(vdi) }).then(({ $getFrom: url }) => {
window.open(`.${url}`)
})
}
@@ -2906,33 +2902,6 @@ export const deleteCloudConfigs = ids => {
export const editCloudConfig = (cloudConfig, props) =>
_call('cloudConfig.update', { ...props, id: resolveId(cloudConfig) })::tap(subscribeCloudConfigs.forceRefresh)
export const subscribeNetworkConfigs = createSubscription(() => _call('cloudConfig.getAllNetworkConfigs'))
export const createNetworkConfig = props =>
_call('cloudConfig.createNetworkConfig', props)::tap(subscribeNetworkConfigs.forceRefresh)
export const deleteNetworkConfigs = ids => {
const { length } = ids
if (length === 0) {
return
}
const vars = { nNetworkConfigs: length }
return confirm({
title: _('confirmDeleteNetworkConfigsTitle', vars),
body: <p>{_('confirmDeleteNetworkConfigsBody', vars)}</p>,
}).then(
() =>
Promise.all(ids.map(id => _call('cloudConfig.delete', { id: resolveId(id) })))::tap(
subscribeNetworkConfigs.forceRefresh
),
noop
)
}
export const editNetworkConfig = (networkConfig, props) =>
_call('cloudConfig.update', { ...props, id: resolveId(networkConfig) })::tap(subscribeNetworkConfigs.forceRefresh)
// XO SAN ----------------------------------------------------------------------
export const getVolumeInfo = (xosanSr, infoType) => _call('xosan.getVolumeInfo', { sr: xosanSr, infoType })

View File

@@ -76,7 +76,7 @@ export default class About extends Component {
</Row>
<Row>
<Col mediumSize={6}>
<a href='https://github.com/vatesfr/xen-orchestra/issues/new/choose'>
<a href='https://github.com/vatesfr/xen-orchestra/issues/new'>
<Icon icon='bug' size={4} />
<h4>{_('bugTracker')}</h4>
</a>

View File

@@ -102,12 +102,7 @@ const TaskError = ({ task }) => {
const [label, className] =
task.status === 'skipped' ? [_('taskReason'), 'text-info'] : [_('taskError'), 'text-danger']
return (
<div>
{_.keyValue(label, <span className={className}>{message}</span>)}
{task.result.name === 'XapiError' && <span className='d-block'>{_('logXapiError')}</span>}
</div>
)
return <div>{_.keyValue(label, <span className={className}>{message}</span>)}</div>
}
const Warnings = ({ warnings }) =>

View File

@@ -46,7 +46,6 @@ import {
SelectHost,
SelectIp,
SelectNetwork,
SelectNetworkConfig,
SelectPool,
SelectResourceSet,
SelectResourceSetIp,
@@ -1047,11 +1046,6 @@ export default class NewVm extends BaseComponent {
})
}
_onChangeNetworkConfig = networkConfig =>
this._setState({
networkConfig: get(() => networkConfig.template),
})
_renderInstallSettings = () => {
const { coreOsDefaultTemplateError } = this.state.state
const { template } = this.props
@@ -1141,13 +1135,16 @@ export default class NewVm extends BaseComponent {
&nbsp;
<AvailableTemplateVars />
&nbsp;
<span className={styles.inlineSelect}>
<SelectCloudConfig disabled={installMethod !== 'customConfig'} onChange={this._onChangeCloudConfig} />
</span>
</LineItem>
<br />
<LineItem>
<Item>
<label className='text-muted'>
{_('newVmUserConfigLabel')}
<br />
<SelectCloudConfig disabled={installMethod !== 'customConfig'} onChange={this._onChangeCloudConfig} />
<DebounceTextarea
className='form-control text-monospace'
disabled={installMethod !== 'customConfig'}
@@ -1162,10 +1159,6 @@ export default class NewVm extends BaseComponent {
<label className='text-muted'>
{_('newVmNetworkConfigLabel')} <NetworkConfigInfo />
<br />
<SelectNetworkConfig
disabled={installMethod !== 'customConfig'}
onChange={this._onChangeNetworkConfig}
/>
<DebounceTextarea
className='form-control text-monospace'
disabled={installMethod !== 'customConfig'}

View File

@@ -5,23 +5,14 @@ import defined from '@xen-orchestra/defined'
import React from 'react'
import SortedTable from 'sorted-table'
import { addSubscriptions } from 'utils'
import { AvailableTemplateVars, DEFAULT_CLOUD_CONFIG_TEMPLATE, DEFAULT_NETWORK_CONFIG_TEMPLATE } from 'cloud-config'
import { AvailableTemplateVars, DEFAULT_CLOUD_CONFIG_TEMPLATE } from 'cloud-config'
import { Container, Col } from 'grid'
import { find } from 'lodash'
import { generateId } from 'reaclette-utils'
import { injectState, provideState } from 'reaclette'
import { Text } from 'editable'
import { Textarea as DebounceTextarea } from 'debounce-input-decorator'
import {
createCloudConfig,
createNetworkConfig,
deleteCloudConfigs,
deleteNetworkConfigs,
editCloudConfig,
editNetworkConfig,
subscribeCloudConfigs,
subscribeNetworkConfigs,
} from 'xo'
import { createCloudConfig, deleteCloudConfigs, editCloudConfig, subscribeCloudConfigs } from 'xo'
// ===================================================================
@@ -41,7 +32,7 @@ const COLUMNS = [
const ACTIONS = [
{
handler: (ids, { type }) => (type === 'network' ? deleteNetworkConfigs(ids) : deleteCloudConfigs(ids)),
handler: deleteCloudConfigs,
icon: 'delete',
individualLabel: _('deleteCloudConfig'),
label: _('deleteSelectedCloudConfigs'),
@@ -61,16 +52,12 @@ const INDIVIDUAL_ACTIONS = [
const initialParams = {
cloudConfigToEditId: undefined,
name: '',
networkConfigToEditId: undefined,
networkConfigName: '',
networkConfigTemplate: undefined,
template: undefined,
}
export default decorate([
addSubscriptions({
cloudConfigs: subscribeCloudConfigs,
networkConfigs: subscribeNetworkConfigs,
}),
provideState({
initialState: () => initialParams,
@@ -83,15 +70,7 @@ export default decorate([
}),
reset: () => state => ({
...state,
cloudConfigToEditId: initialParams.cloudConfigToEditId,
name: initialParams.name,
template: initialParams.template,
}),
resetNetworkForm: () => state => ({
...state,
networkConfigToEditId: initialParams.networkConfigToEditId,
networkConfigName: initialParams.networkConfigName,
networkConfigTemplate: initialParams.networkConfigTemplate,
...initialParams,
}),
createCloudConfig:
({ reset }) =>
@@ -99,12 +78,6 @@ export default decorate([
await createCloudConfig({ name, template })
reset()
},
createNetworkConfig:
({ resetNetworkForm }) =>
async ({ networkConfigName, networkConfigTemplate = DEFAULT_NETWORK_CONFIG_TEMPLATE }) => {
await createNetworkConfig({ name: networkConfigName, template: networkConfigTemplate })
resetNetworkForm()
},
editCloudConfig:
({ reset }) =>
async ({ name, template, cloudConfigToEditId }, { cloudConfigs }) => {
@@ -114,23 +87,6 @@ export default decorate([
}
reset()
},
editNetworkConfig:
({ resetNetworkForm }) =>
async ({ networkConfigName, networkConfigTemplate, networkConfigToEditId }, { networkConfigs }) => {
const oldNetworkConfig = find(networkConfigs, { id: networkConfigToEditId })
if (oldNetworkConfig.name !== networkConfigName || oldNetworkConfig.template !== networkConfigTemplate) {
await editNetworkConfig(networkConfigToEditId, { name: networkConfigName, template: networkConfigTemplate })
}
resetNetworkForm()
},
populateNetworkForm:
(_, { id, name, template }) =>
state => ({
...state,
networkConfigName: name,
networkConfigToEditId: id,
networkConfigTemplate: template,
}),
populateForm:
(_, { id, name, template }) =>
state => ({
@@ -145,147 +101,76 @@ export default decorate([
inputNameId: generateId,
inputTemplateId: generateId,
isInvalid: ({ name, template }) => name.trim() === '' || (template !== undefined && template.trim() === ''),
isNetworkInvalid: props =>
props.networkConfigName.trim() === '' ||
(props.networkConfigTemplate !== undefined && props.networkConfigTemplate.trim() === ''),
},
}),
injectState,
({ cloudConfigs, effects, networkConfigs, state }) => (
<div>
<Container>
<Col mediumSize={6}>
<h2>{_('cloudConfig')}</h2>
<form id={state.formId}>
<div className='form-group'>
<label htmlFor={state.inputNameId}>
<strong>{_('formName')}</strong>{' '}
</label>
<input
className='form-control'
id={state.inputNameId}
name='name'
onChange={effects.setInputValue}
type='text'
value={state.name}
/>
</div>{' '}
<div className='form-group'>
<label htmlFor={state.inputTemplateId}>
<strong>{_('settingsCloudConfigTemplate')}</strong>{' '}
</label>{' '}
<AvailableTemplateVars />
<DebounceTextarea
className='form-control text-monospace'
id={state.inputTemplateId}
name='template'
onChange={effects.setInputValue}
rows={12}
value={defined(state.template, DEFAULT_CLOUD_CONFIG_TEMPLATE)}
/>
</div>{' '}
{state.cloudConfigToEditId !== undefined ? (
<ActionButton
btnStyle='primary'
disabled={state.isInvalid}
form={state.formId}
handler={effects.editCloudConfig}
icon='edit'
>
{_('formEdit')}
</ActionButton>
) : (
<ActionButton
btnStyle='success'
disabled={state.isInvalid}
form={state.formId}
handler={effects.createCloudConfig}
icon='add'
>
{_('formCreate')}
</ActionButton>
)}
<ActionButton className='pull-right' handler={effects.reset} icon='cancel'>
{_('formCancel')}
({ state, effects, cloudConfigs }) => (
<Container>
<Col mediumSize={6}>
<form id={state.formId}>
<div className='form-group'>
<label htmlFor={state.inputNameId}>
<strong>{_('formName')}</strong>{' '}
</label>
<input
className='form-control'
id={state.inputNameId}
name='name'
onChange={effects.setInputValue}
type='text'
value={state.name}
/>
</div>{' '}
<div className='form-group'>
<label htmlFor={state.inputTemplateId}>
<strong>{_('settingsCloudConfigTemplate')}</strong>{' '}
</label>{' '}
<AvailableTemplateVars />
<DebounceTextarea
className='form-control text-monospace'
id={state.inputTemplateId}
name='template'
onChange={effects.setInputValue}
rows={12}
value={defined(state.template, DEFAULT_CLOUD_CONFIG_TEMPLATE)}
/>
</div>{' '}
{state.cloudConfigToEditId !== undefined ? (
<ActionButton
btnStyle='primary'
disabled={state.isInvalid}
form={state.formId}
handler={effects.editCloudConfig}
icon='edit'
>
{_('formEdit')}
</ActionButton>
</form>
</Col>
<Col mediumSize={6}>
<SortedTable
actions={ACTIONS}
collection={cloudConfigs}
columns={COLUMNS}
data-populateForm={effects.populateForm}
individualActions={INDIVIDUAL_ACTIONS}
stateUrlParam='s'
/>
</Col>
</Container>
<Container className='mt-2'>
<Col mediumSize={6}>
<h2>{_('networkConfig')}</h2>
<form>
<div className='form-group'>
<label>
<strong>{_('formName')}</strong>
</label>
<input
className='form-control'
name='networkConfigName'
onChange={effects.setInputValue}
type='text'
value={state.networkConfigName}
/>
</div>
<div className='form-group'>
<label htmlFor={state.inputTemplateId}>
<strong>{_('settingsCloudConfigTemplate')}</strong>
</label>
<DebounceTextarea
className='form-control text-monospace'
id={state.inputTemplateId}
name='networkConfigTemplate'
onChange={effects.setInputValue}
rows={12}
value={defined(state.networkConfigTemplate, DEFAULT_NETWORK_CONFIG_TEMPLATE)}
/>
</div>
{state.networkConfigToEditId !== undefined ? (
<ActionButton
btnStyle='primary'
disabled={state.isNetworkInvalid}
handler={effects.editNetworkConfig}
icon='edit'
>
{_('formEdit')}
</ActionButton>
) : (
<ActionButton
btnStyle='success'
disabled={state.isNetworkInvalid}
handler={effects.createNetworkConfig}
icon='add'
>
{_('formCreate')}
</ActionButton>
)}
<ActionButton className='pull-right' handler={effects.resetNetworkForm} icon='cancel'>
{_('formCancel')}
) : (
<ActionButton
btnStyle='success'
disabled={state.isInvalid}
form={state.formId}
handler={effects.createCloudConfig}
icon='add'
>
{_('formCreate')}
</ActionButton>
</form>
</Col>
<Col mediumSize={6}>
<SortedTable
actions={ACTIONS}
collection={networkConfigs}
columns={COLUMNS}
data-populateForm={effects.populateNetworkForm}
data-type='network'
individualActions={INDIVIDUAL_ACTIONS}
stateUrlParam='n'
/>
</Col>
</Container>
</div>
)}
<ActionButton className='pull-right' handler={effects.reset} icon='cancel'>
{_('formCancel')}
</ActionButton>
</form>
</Col>
<Col mediumSize={6}>
<SortedTable
actions={ACTIONS}
collection={cloudConfigs}
columns={COLUMNS}
data-populateForm={effects.populateForm}
individualActions={INDIVIDUAL_ACTIONS}
stateUrlParam='s'
/>
</Col>
</Container>
),
])

View File

@@ -249,14 +249,6 @@ const COLUMNS_SMB_REMOTE = [
const COLUMNS_S3_REMOTE = [
COLUMN_NAME,
{
itemRenderer: remote => remote.protocol === 'https' && <Icon icon='success' />,
name: <span>{_('remoteS3LabelUseHttps')} </span>,
},
{
itemRenderer: remote => remote.allowUnauthorized && <Icon icon='success' />,
name: <span>{_('remoteS3LabelAllowInsecure')} </span>,
},
{
itemRenderer: (remote, { formatMessage }) => (
<Text

View File

@@ -42,7 +42,6 @@ export default decorate([
bucket: undefined,
protocol: undefined,
region: undefined,
allowUnauthorized: undefined,
}),
effects: {
linkState,
@@ -68,7 +67,6 @@ export default decorate([
username = remote.username,
protocol = remote.protocol || 'https',
region = remote.region,
allowUnauthorized = remote.allowUnauthorized,
} = state
let { path = remote.path } = state
if (type === 's3') {
@@ -87,7 +85,6 @@ export default decorate([
username,
protocol,
region,
allowUnauthorized,
}),
options: options !== '' ? options : null,
proxy: proxyId,
@@ -148,10 +145,7 @@ export default decorate([
this.state.password = value
},
setInsecure(_, value) {
this.state.protocol = value ? 'https' : 'http'
},
setAllowUnauthorized(_, value) {
this.state.allowUnauthorized = value
this.state.protocol = value ? 'http' : 'https'
},
},
computed: {
@@ -181,7 +175,6 @@ export default decorate([
proxyId = remote.proxy,
type = remote.type || 'nfs',
username = remote.username || '',
allowUnauthorized = remote.allowUnauthorized || false,
} = state
return (
<div>
@@ -350,35 +343,11 @@ export default decorate([
{type === 's3' && (
<fieldset className='form-group form-group'>
<div className='input-group form-group'>
<span className='align-middle'>
{_('remoteS3LabelUseHttps')}{' '}
<Tooltip content={_('remoteS3TooltipProtocol')}>
<Icon icon='info' size='lg' />
<span className='input-group-addon'>
<Tooltip content={formatMessage(messages.remoteS3TooltipProtocol)}>
<Toggle iconSize={1} onChange={effects.setInsecure} value={protocol === 'http'} />
</Tooltip>
</span>
<Toggle
className='align-middle pull-right'
onChange={effects.setInsecure}
value={protocol === 'https'}
/>
</div>
<div className='input-group form-group'>
<span className='align-middle '>
{_('remoteS3LabelAllowInsecure')}{' '}
<Tooltip content={_('remoteS3TooltipAcceptInsecure')}>
<Icon icon='info' size='lg' />
</Tooltip>
</span>
<Toggle
className='align-middle pull-right'
disabled={protocol !== 'https'}
onChange={effects.setAllowUnauthorized}
value={allowUnauthorized}
/>
</div>
<div className='input-group form-group'>
<input
className='form-control'
name='host'

View File

@@ -16,10 +16,8 @@ import {
copyVm,
deleteSnapshot,
deleteSnapshots,
exportVdi,
exportVm,
editVm,
isCheckpointSnapshot,
revertSnapshot,
snapshotVm,
} from 'xo'
@@ -58,7 +56,8 @@ const COLUMNS = [
itemRenderer: snapshot => (
<div>
<Text onChange={value => editVm(snapshot, { name_label: value })} value={snapshot.name_label} />{' '}
{isCheckpointSnapshot(snapshot) && (
{/* checkpoint snapshots are in a Suspended state */}
{snapshot.power_state === 'Suspended' && (
<Tooltip content={_('snapshotMemorySaved')}>
<Icon icon='memory' color='text-success' />
</Tooltip>
@@ -98,13 +97,6 @@ const INDIVIDUAL_ACTIONS = [
icon: 'export',
label: _('exportSnapshot'),
},
{
collapsed: true,
disabled: snapshot => !isCheckpointSnapshot(snapshot),
handler: ({ suspendVdi }) => exportVdi(suspendVdi),
icon: 'memory',
label: _('exportSnapshotMemory'),
},
{
collapsed: true,
handler: copyToTemplate,

543
yarn.lock

File diff suppressed because it is too large Load Diff