Compare commits
1 Commits
check_vhd_
...
self-signe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5eb0055ebb |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,6 +10,8 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/examples/node_modules/
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ exports.EventListenersManager = class EventListenersManager {
|
||||
}
|
||||
|
||||
add(type, listener) {
|
||||
let listeners = this._listeners.get(type)
|
||||
let listeners = this._listeners[type]
|
||||
if (listeners === undefined) {
|
||||
listeners = new Set()
|
||||
this._listeners.set(type, listeners)
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const t = require('tap')
|
||||
const { EventEmitter } = require('events')
|
||||
|
||||
const { EventListenersManager } = require('./')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// function spy (impl = Function.prototype) {
|
||||
// function spy() {
|
||||
// spy.calls.push([Array.from(arguments), this])
|
||||
// }
|
||||
// spy.calls = []
|
||||
// return spy
|
||||
// }
|
||||
|
||||
function assertListeners(t, event, listeners) {
|
||||
t.strictSame(t.context.ee.listeners(event), listeners)
|
||||
}
|
||||
|
||||
t.beforeEach(function (t) {
|
||||
t.context.ee = new EventEmitter()
|
||||
t.context.em = new EventListenersManager(t.context.ee)
|
||||
})
|
||||
|
||||
t.test('.add adds a listener', function (t) {
|
||||
t.context.em.add('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.add does not add a duplicate listener', function (t) {
|
||||
t.context.em.add('foo', noop).add('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.remove removes a listener', function (t) {
|
||||
t.context.em.add('foo', noop).remove('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.removeAll removes all listeners of a given type', function (t) {
|
||||
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
assertListeners(t, 'bar', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.removeAll removes all listeners', function (t) {
|
||||
t.context.em.add('foo', noop).add('bar', noop).removeAll()
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
assertListeners(t, 'bar', [])
|
||||
|
||||
t.end()
|
||||
})
|
||||
@@ -35,12 +35,8 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.1",
|
||||
"version": "1.0.0",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --branches=72"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.2.0"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
### `readChunk(stream, [size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
- returns `null` if the stream has ended
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
@@ -14,13 +11,3 @@ import { readChunk } from '@vates/read-chunk'
|
||||
}
|
||||
})()
|
||||
```
|
||||
|
||||
### `readChunkStrict(stream, [size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
@@ -16,12 +16,9 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
|
||||
|
||||
## Usage
|
||||
|
||||
### `readChunk(stream, [size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
- returns `null` if the stream has ended
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
@@ -33,16 +30,6 @@ import { readChunk } from '@vates/read-chunk'
|
||||
})()
|
||||
```
|
||||
|
||||
### `readChunkStrict(stream, [size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -30,22 +30,3 @@ const readChunk = (stream, size) =>
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
const { readChunk } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
@@ -43,27 +43,3 @@ describe('readChunk', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended without data')
|
||||
expect(error.chunk).toEqual(undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended with not enough data')
|
||||
expect(error.chunk).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.0.0",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.26.0",
|
||||
"@xen-orchestra/fs": "^1.1.0",
|
||||
"@xen-orchestra/backups": "^0.23.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.7.4",
|
||||
"version": "0.7.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
|
||||
@@ -15,7 +15,7 @@ const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
@@ -47,12 +47,13 @@ const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
try {
|
||||
const subFiles = await readdir(realPath)
|
||||
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOTDIR') {
|
||||
throw error
|
||||
}
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
@@ -291,7 +292,7 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
#useVhdDirectory() {
|
||||
return this.handler.useVhdDirectory()
|
||||
return this.handler.type === 's3'
|
||||
}
|
||||
|
||||
#useAlias() {
|
||||
@@ -382,12 +383,8 @@ class RemoteAdapter {
|
||||
const entriesMap = {}
|
||||
await asyncMap(await readdir(path), async name => {
|
||||
try {
|
||||
const stats = await lstat(`${path}/${name}`)
|
||||
if (stats.isDirectory()) {
|
||||
entriesMap[name + '/'] = {}
|
||||
} else if (stats.isFile()) {
|
||||
entriesMap[name] = {}
|
||||
}
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
|
||||
@@ -153,13 +153,6 @@ class VmBackup {
|
||||
errors.push(error)
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
|
||||
@@ -47,32 +47,42 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
// | |
|
||||
// \___________rename_____________/
|
||||
|
||||
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
|
||||
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
|
||||
assert(chain.length >= 2)
|
||||
const chainCopy = [...chain]
|
||||
const parent = chainCopy.pop()
|
||||
const children = chainCopy
|
||||
|
||||
if (merge) {
|
||||
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
|
||||
onLog(`merging ${children.length} children into ${parent}`)
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total })
|
||||
onLog(`merging ${children.join(',')} into ${parent}: ${done}/${total}`)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
const mergedSize = await mergeVhd(handler, parent, handler, children, {
|
||||
logInfo,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
remove,
|
||||
})
|
||||
|
||||
clearInterval(handle)
|
||||
const mergeTargetChild = children.shift()
|
||||
await Promise.all([
|
||||
VhdAbstract.rename(handler, parent, mergeTargetChild),
|
||||
asyncMap(children, child => {
|
||||
onLog(`the VHD ${child} is already merged`)
|
||||
if (remove) {
|
||||
onLog(`deleting merged VHD ${child}`)
|
||||
return VhdAbstract.unlink(handler, child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
return mergedSize
|
||||
}
|
||||
}
|
||||
@@ -115,19 +125,14 @@ const listVhds = async (handler, vmDir) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
) {
|
||||
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
|
||||
const aliasFound = []
|
||||
for (const path of aliasPaths) {
|
||||
const target = await resolveVhdAlias(handler, path)
|
||||
|
||||
if (!isVhdFile(target)) {
|
||||
logWarn('alias references non VHD target', { path, target })
|
||||
onLog(`Alias ${path} references a non vhd target: ${target}`)
|
||||
if (remove) {
|
||||
logInfo('removing alias and non VHD target', { path, target })
|
||||
await handler.unlink(target)
|
||||
await handler.unlink(path)
|
||||
}
|
||||
@@ -142,13 +147,13 @@ async function checkAliases(
|
||||
// error during dispose should not trigger a deletion
|
||||
}
|
||||
} catch (error) {
|
||||
logWarn('missing or broken alias target', { target, path, error })
|
||||
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
|
||||
if (remove) {
|
||||
try {
|
||||
await VhdAbstract.unlink(handler, path)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
logWarn('error deleting alias target', { target, path, error })
|
||||
} catch (e) {
|
||||
if (e.code !== 'ENOENT') {
|
||||
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -165,22 +170,20 @@ async function checkAliases(
|
||||
|
||||
entries.forEach(async entry => {
|
||||
if (!aliasFound.includes(entry)) {
|
||||
logWarn('no alias references VHD', { entry })
|
||||
onLog(`the Vhd ${entry} is not referenced by a an alias`)
|
||||
if (remove) {
|
||||
logInfo('deleting unaliased VHD')
|
||||
await VhdAbstract.unlink(handler, entry)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
|
||||
) {
|
||||
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
|
||||
|
||||
@@ -189,7 +192,6 @@ exports.cleanVm = async function cleanVm(
|
||||
const vhdsToJSons = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
const vhdById = new Map()
|
||||
|
||||
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
|
||||
|
||||
@@ -209,31 +211,16 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
const duplicate = vhdById.get(vhd.footer.uuid)
|
||||
if (duplicate !== undefined) {
|
||||
logWarn('uuid is duplicated', { uuid: vhd.footer.uuid })
|
||||
if (duplicate.containsAllDataOf(vhd)) {
|
||||
logWarn(`should delete ${path}`)
|
||||
} else if (vhd.containsAllDataOf(duplicate)) {
|
||||
logWarn(`should delete ${duplicate._path}`)
|
||||
} else {
|
||||
logWarn(`same ids but different content`)
|
||||
}
|
||||
}
|
||||
vhdById.set(vhd.footer.uuid, vhd)
|
||||
})
|
||||
} catch (error) {
|
||||
vhds.delete(path)
|
||||
logWarn('VHD check error', { path, error })
|
||||
onLog(`error while checking the VHD with path ${path}`, { error })
|
||||
if (error?.code === 'ERR_ASSERTION' && remove) {
|
||||
logInfo('deleting broken path', { path })
|
||||
onLog(`deleting broken ${path}`)
|
||||
return VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
})
|
||||
// the vhd are closed at the end of the disposable
|
||||
// it's unsafe to use them later
|
||||
vhdById.clear()
|
||||
|
||||
// remove interrupted merge states for missing VHDs
|
||||
for (const interruptedVhd of interruptedVhds.keys()) {
|
||||
@@ -241,12 +228,12 @@ exports.cleanVm = async function cleanVm(
|
||||
const statePath = interruptedVhds.get(interruptedVhd)
|
||||
interruptedVhds.delete(interruptedVhd)
|
||||
|
||||
logWarn('orphan merge state', {
|
||||
onLog('orphan merge state', {
|
||||
mergeStatePath: statePath,
|
||||
missingVhdPath: interruptedVhd,
|
||||
})
|
||||
if (remove) {
|
||||
logInfo('deleting orphan merge state', { statePath })
|
||||
onLog(`deleting orphan merge state ${statePath}`)
|
||||
await handler.unlink(statePath)
|
||||
}
|
||||
}
|
||||
@@ -255,7 +242,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check if alias are correct
|
||||
// check if all vhd in data subfolder have a corresponding alias
|
||||
await asyncMap(Object.keys(aliases), async dir => {
|
||||
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
|
||||
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
@@ -277,9 +264,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhdPath)
|
||||
|
||||
logWarn('parent VHD is missing', { parent, vhdPath })
|
||||
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
|
||||
if (remove) {
|
||||
logInfo('deleting orphan VHD', { vhdPath })
|
||||
onLog(`deleting orphan VHD ${vhdPath}`)
|
||||
deletions.push(VhdAbstract.unlink(handler, vhdPath))
|
||||
}
|
||||
}
|
||||
@@ -316,7 +303,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await this.isValidXva(path))) {
|
||||
logWarn('XVA might be broken', { path })
|
||||
onLog(`the XVA with path ${path} is potentially broken`)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -330,7 +317,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
metadata = JSON.parse(await handler.readFile(json))
|
||||
} catch (error) {
|
||||
logWarn('failed to read metadata file', { json, error })
|
||||
onLog(`failed to read metadata file ${json}`, { error })
|
||||
jsons.delete(json)
|
||||
return
|
||||
}
|
||||
@@ -341,9 +328,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
logWarn('metadata XVA is missing', { json })
|
||||
onLog(`the XVA linked to the metadata ${json} is missing`)
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { json })
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -364,9 +351,9 @@ exports.cleanVm = async function cleanVm(
|
||||
vhdsToJSons[path] = json
|
||||
})
|
||||
} else {
|
||||
logWarn('some metadata VHDs are missing', { json, missingVhds })
|
||||
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { json })
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -407,9 +394,9 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
}
|
||||
|
||||
logWarn('unused VHD', { vhd })
|
||||
onLog(`the VHD ${vhd} is unused`)
|
||||
if (remove) {
|
||||
logInfo('deleting unused VHD', { vhd })
|
||||
onLog(`deleting unused VHD ${vhd}`)
|
||||
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
|
||||
}
|
||||
}
|
||||
@@ -433,7 +420,7 @@ exports.cleanVm = async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
|
||||
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
@@ -445,18 +432,18 @@ exports.cleanVm = async function cleanVm(
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
logWarn('unused XVA', { path })
|
||||
onLog(`the XVA ${path} is unused`)
|
||||
if (remove) {
|
||||
logInfo('deleting unused XVA', { path })
|
||||
onLog(`deleting unused XVA ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
logInfo('unused XVA checksum', { path })
|
||||
onLog(`the XVA checksum ${path} is unused`)
|
||||
if (remove) {
|
||||
logInfo('deleting unused XVA checksum', { path })
|
||||
onLog(`deleting unused XVA checksum ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}
|
||||
@@ -490,11 +477,11 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
|
||||
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logWarn('failed to get metadata size', { metadataPath, error })
|
||||
onLog(`failed to get size of ${metadataPath}`, { error })
|
||||
return
|
||||
}
|
||||
|
||||
@@ -504,7 +491,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
} catch (error) {
|
||||
logWarn('metadata size update failed', { metadataPath, error })
|
||||
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -69,8 +69,6 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
├─ task.warning(message: string)
|
||||
├─ task.start(data: { type: 'VM', id: string })
|
||||
│ ├─ task.warning(message: string)
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'snapshot')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
|
||||
@@ -91,8 +89,12 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
│ │ ├─ task.start(message: 'clean')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
│ │ │ └─ task.end
|
||||
│ │ └─ task.end
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ │
|
||||
│ │ │ // in case of delta backup
|
||||
│ │ ├─ task.start(message: 'merge')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
│ │ │ └─ task.end(result: { size: number })
|
||||
│ │ │
|
||||
│ │ └─ task.end
|
||||
│ └─ task.end
|
||||
└─ job.end
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.26.0",
|
||||
"version": "0.23.0",
|
||||
"engines": {
|
||||
"node": ">=14.6"
|
||||
},
|
||||
@@ -22,7 +22,7 @@
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^1.1.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^4.0.1",
|
||||
@@ -38,7 +38,7 @@
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.3.1",
|
||||
"vhd-lib": "^3.1.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -46,7 +46,7 @@
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^1.4.0"
|
||||
"@xen-orchestra/xapi": "^1.0.0"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -6,9 +6,8 @@ const { join } = require('path')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const MergeWorker = require('../merge-worker/index.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
const { warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
|
||||
exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
class MixinBackupWriter extends BaseClass {
|
||||
@@ -26,17 +25,11 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
|
||||
async _cleanVm(options) {
|
||||
try {
|
||||
return await Task.run({ name: 'clean-vm' }, () => {
|
||||
return this._adapter.cleanVm(this.#vmBackupDir, {
|
||||
...options,
|
||||
fixMetadata: true,
|
||||
logInfo: info,
|
||||
logWarn: (message, data) => {
|
||||
warn(message, data)
|
||||
Task.warning(message, data)
|
||||
},
|
||||
lock: false,
|
||||
})
|
||||
return await this._adapter.cleanVm(this.#vmBackupDir, {
|
||||
...options,
|
||||
fixMetadata: true,
|
||||
onLog: warn,
|
||||
lock: false,
|
||||
})
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/emit-async",
|
||||
"version": "1.0.0",
|
||||
"version": "0.1.0",
|
||||
"license": "ISC",
|
||||
"description": "Emit an event for async listeners to settle",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "1.1.0",
|
||||
"version": "1.0.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
@@ -42,7 +42,7 @@
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^4.0.2",
|
||||
"xo-remote-parser": "^0.9.1"
|
||||
"xo-remote-parser": "^0.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
|
||||
import getStream from 'get-stream'
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
@@ -12,8 +11,6 @@ import { synchronized } from 'decorator-synchronized'
|
||||
import { basename, dirname, normalize as normalizePath } from './_path'
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
const { warn } = createLogger('@xen-orchestra:fs')
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime, size) => {
|
||||
const seconds = hrtime[0] + hrtime[1] / 1e9
|
||||
@@ -360,12 +357,11 @@ export default class RemoteHandlerAbstract {
|
||||
readRate: computeRate(readDuration, SIZE),
|
||||
}
|
||||
} catch (error) {
|
||||
warn(`error while testing the remote at step ${step}`, { error })
|
||||
return {
|
||||
success: false,
|
||||
step,
|
||||
file: testFileName,
|
||||
error,
|
||||
error: error.message || String(error),
|
||||
}
|
||||
} finally {
|
||||
ignoreErrors.call(this._unlink(testFileName))
|
||||
@@ -424,10 +420,6 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
// Methods that can be implemented by inheriting classes
|
||||
|
||||
useVhdDirectory() {
|
||||
return this._remote.useVhdDirectory ?? false
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
@@ -525,8 +525,4 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _closeFile(fd) {}
|
||||
|
||||
useVhdDirectory() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "0.5.0",
|
||||
"version": "0.4.0",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/event-listeners-manager": "^1.0.1",
|
||||
"@vates/event-listeners-manager": "^1.0.0",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/emit-async": "^1.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.1.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"app-conf": "^2.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.2",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -30,7 +30,7 @@
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/read-chunk": "^1.0.0"
|
||||
"@vates/read-chunk": "^0.1.2"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -33,19 +33,26 @@ async function main(argv) {
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
|
||||
const opts = getopts(argv, {
|
||||
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
|
||||
|
||||
const {
|
||||
_: args,
|
||||
file,
|
||||
help,
|
||||
host,
|
||||
raw,
|
||||
token,
|
||||
} = getopts(argv, {
|
||||
alias: { file: 'f', help: 'h' },
|
||||
boolean: ['help', 'raw'],
|
||||
default: {
|
||||
token: config.authenticationToken,
|
||||
},
|
||||
stopEarly: true,
|
||||
string: ['file', 'host', 'token', 'url'],
|
||||
string: ['file', 'host', 'token'],
|
||||
})
|
||||
|
||||
const { _: args, file } = opts
|
||||
|
||||
if (opts.help || (file === '' && args.length === 0)) {
|
||||
if (help || (file === '' && args.length === 0)) {
|
||||
return console.log(
|
||||
'%s',
|
||||
`Usage:
|
||||
@@ -70,29 +77,18 @@ ${pkg.name} v${pkg.version}`
|
||||
const baseRequest = {
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
cookie: `authenticationToken=${token}`,
|
||||
},
|
||||
pathname: '/api/v1',
|
||||
protocol: 'https:',
|
||||
rejectUnauthorized: false,
|
||||
}
|
||||
let { token } = opts
|
||||
if (opts.url !== '') {
|
||||
const { protocol, host, username } = new URL(opts.url)
|
||||
Object.assign(baseRequest, { protocol, host })
|
||||
if (username !== '') {
|
||||
token = username
|
||||
}
|
||||
if (host !== '') {
|
||||
baseRequest.host = host
|
||||
} else {
|
||||
baseRequest.protocol = 'https:'
|
||||
if (opts.host !== '') {
|
||||
baseRequest.host = opts.host
|
||||
} else {
|
||||
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
|
||||
baseRequest.hostname = hostname
|
||||
baseRequest.port = port
|
||||
}
|
||||
baseRequest.hostname = hostname
|
||||
baseRequest.port = port
|
||||
}
|
||||
baseRequest.headers.cookie = `authenticationToken=${token}`
|
||||
|
||||
const call = async ({ method, params }) => {
|
||||
if (callPath.length !== 0) {
|
||||
process.stderr.write(`\n${colors.bold(`--- call #${callPath.join('.')}`)} ---\n\n`)
|
||||
@@ -131,7 +127,7 @@ ${pkg.name} v${pkg.version}`
|
||||
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
|
||||
stdout.write('\n')
|
||||
}
|
||||
} else if (opts.raw && typeof result === 'string') {
|
||||
} else if (raw && typeof result === 'string') {
|
||||
stdout.write(result)
|
||||
} else {
|
||||
stdout.write(inspect(result, { colors: true, depth: null }))
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/proxy-cli",
|
||||
"version": "0.3.1",
|
||||
"version": "0.2.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "CLI for @xen-orchestra/proxy",
|
||||
"keywords": [
|
||||
@@ -26,7 +26,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.0",
|
||||
"@vates/read-chunk": "^1.0.0",
|
||||
"@vates/read-chunk": "^0.1.2",
|
||||
"ansi-colors": "^4.1.1",
|
||||
"app-conf": "^2.1.0",
|
||||
"content-type": "^1.0.4",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/proxy",
|
||||
"version": "0.23.4",
|
||||
"version": "0.22.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "XO Proxy used to remotely execute backup jobs",
|
||||
"keywords": [
|
||||
@@ -32,13 +32,13 @@
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.26.0",
|
||||
"@xen-orchestra/fs": "^1.1.0",
|
||||
"@xen-orchestra/backups": "^0.23.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/mixin": "^0.1.0",
|
||||
"@xen-orchestra/mixins": "^0.5.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.3",
|
||||
"@xen-orchestra/xapi": "^1.4.0",
|
||||
"@xen-orchestra/mixins": "^0.4.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.0",
|
||||
"@xen-orchestra/xapi": "^1.0.0",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.1.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
@@ -46,7 +46,7 @@
|
||||
"get-stream": "^6.0.0",
|
||||
"getopts": "^2.2.3",
|
||||
"golike-defer": "^0.5.1",
|
||||
"http-server-plus": "^0.11.1",
|
||||
"http-server-plus": "^0.11.0",
|
||||
"http2-proxy": "^5.0.53",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"jsonrpc-websocket-client": "^0.7.2",
|
||||
@@ -60,7 +60,7 @@
|
||||
"source-map-support": "^0.5.16",
|
||||
"stoppable": "^1.0.6",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xen-api": "^1.2.1",
|
||||
"xen-api": "^1.2.0",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -8,7 +8,21 @@ exports.genSelfSignedCert = async ({ days = 360 } = {}) =>
|
||||
new Promise((resolve, reject) => {
|
||||
execFile(
|
||||
'openssl',
|
||||
['req', '-batch', '-new', '-x509', '-days', String(days), '-nodes', '-newkey', 'rsa:2048', '-keyout', '-'],
|
||||
[
|
||||
'req',
|
||||
'-batch',
|
||||
'-new',
|
||||
'-x509',
|
||||
'-days',
|
||||
String(days),
|
||||
'-nodes',
|
||||
'-newkey',
|
||||
'ec',
|
||||
'-pkeyopt',
|
||||
'ec_paramgen_curve:secp384r1',
|
||||
'-keyout',
|
||||
'-',
|
||||
],
|
||||
(error, stdout) => {
|
||||
if (error != null) {
|
||||
return reject(error)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.3",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
3
@xen-orchestra/template/.babelrc.js
Normal file
3
@xen-orchestra/template/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
1
@xen-orchestra/template/.eslintrc.js
Symbolic link
1
@xen-orchestra/template/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -14,13 +14,31 @@
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.15"
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.15"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const escapeRegExp = require('lodash/escapeRegExp')
|
||||
import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
exports.compileTemplate = function compileTemplate(pattern, rules) {
|
||||
export function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
@@ -1,8 +1,5 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
'use strict'
|
||||
|
||||
const { compileTemplate } = require('.')
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/upload-ova",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.4",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI to upload ova files to Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -43,7 +43,7 @@
|
||||
"pw": "^0.0.4",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.11.1",
|
||||
"xo-vmdk-to-vhd": "^2.4.2"
|
||||
"xo-vmdk-to-vhd": "^2.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
// TODO: remove when Node >=15.0
|
||||
module.exports = class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
@@ -230,9 +230,8 @@ function mixin(mixins) {
|
||||
defineProperties(xapiProto, descriptors)
|
||||
}
|
||||
mixin({
|
||||
host: require('./host.js'),
|
||||
SR: require('./sr.js'),
|
||||
task: require('./task.js'),
|
||||
host: require('./host.js'),
|
||||
VBD: require('./vbd.js'),
|
||||
VDI: require('./vdi.js'),
|
||||
VIF: require('./vif.js'),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xapi",
|
||||
"version": "1.4.0",
|
||||
"version": "1.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
@@ -15,7 +15,7 @@
|
||||
"node": ">=14"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
@@ -26,10 +26,8 @@
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"json-rpc-protocol": "^0.13.2",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^3.3.1",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"private": false,
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { incorrectState } = require('xo-common/api-errors')
|
||||
const { VDI_FORMAT_RAW } = require('./index.js')
|
||||
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
|
||||
|
||||
const AggregateError = require('./_AggregateError.js')
|
||||
|
||||
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:sr')
|
||||
|
||||
const OC_MAINTENANCE = 'xo:maintenanceState'
|
||||
|
||||
class Sr {
|
||||
async create({
|
||||
content_type = 'user', // recommended by Citrix
|
||||
device_config,
|
||||
host,
|
||||
name_description = '',
|
||||
name_label,
|
||||
physical_size = 0,
|
||||
shared,
|
||||
sm_config = {},
|
||||
type,
|
||||
}) {
|
||||
const ref = await this.call(
|
||||
'SR.create',
|
||||
host,
|
||||
device_config,
|
||||
physical_size,
|
||||
name_label,
|
||||
name_description,
|
||||
type,
|
||||
content_type,
|
||||
shared,
|
||||
sm_config
|
||||
)
|
||||
|
||||
// https://developer-docs.citrix.com/projects/citrix-hypervisor-sdk/en/latest/xc-api-extensions/#sr
|
||||
this.setFieldEntry('SR', ref, 'other_config', 'auto-scan', 'true').catch(warn)
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
// Switch the SR to maintenance mode:
|
||||
// - shutdown all running VMs with a VDI on this SR
|
||||
// - their UUID is saved into SR.other_config[OC_MAINTENANCE].shutdownVms
|
||||
// - clean shutdown is attempted, and falls back to a hard shutdown
|
||||
// - unplug all connected hosts from this SR
|
||||
async enableMaintenanceMode($defer, ref, { vmsToShutdown = [] } = {}) {
|
||||
const state = { timestamp: Date.now() }
|
||||
|
||||
// will throw if already in maintenance mode
|
||||
await this.call('SR.add_to_other_config', ref, OC_MAINTENANCE, JSON.stringify(state))
|
||||
|
||||
await $defer.onFailure.call(this, 'call', 'SR.remove_from_other_config', ref, OC_MAINTENANCE)
|
||||
|
||||
const runningVms = new Map()
|
||||
const handleVbd = async ref => {
|
||||
const vmRef = await this.getField('VBD', ref, 'VM')
|
||||
if (!runningVms.has(vmRef)) {
|
||||
const power_state = await this.getField('VM', vmRef, 'power_state')
|
||||
const isPaused = power_state === 'Paused'
|
||||
if (isPaused || power_state === 'Running') {
|
||||
runningVms.set(vmRef, isPaused)
|
||||
}
|
||||
}
|
||||
}
|
||||
await asyncMap(await this.getField('SR', ref, 'VDIs'), async ref => {
|
||||
await asyncMap(await this.getField('VDI', ref, 'VBDs'), handleVbd)
|
||||
})
|
||||
|
||||
{
|
||||
const runningVmUuids = await asyncMap(runningVms.keys(), ref => this.getField('VM', ref, 'uuid'))
|
||||
|
||||
const set = new Set(vmsToShutdown)
|
||||
for (const vmUuid of runningVmUuids) {
|
||||
if (!set.has(vmUuid)) {
|
||||
throw incorrectState({
|
||||
actual: vmsToShutdown,
|
||||
expected: runningVmUuids,
|
||||
property: 'vmsToShutdown',
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state.shutdownVms = {}
|
||||
|
||||
await asyncMapSettled(runningVms, async ([ref, isPaused]) => {
|
||||
state.shutdownVms[await this.getField('VM', ref, 'uuid')] = isPaused
|
||||
|
||||
try {
|
||||
await this.callAsync('VM.clean_shutdown', ref)
|
||||
} catch (error) {
|
||||
warn('SR_enableMaintenanceMode, VM clean shutdown', { error })
|
||||
await this.callAsync('VM.hard_shutdown', ref)
|
||||
}
|
||||
|
||||
$defer.onFailure.call(this, 'callAsync', 'VM.start', ref, isPaused, true)
|
||||
})
|
||||
|
||||
state.unpluggedPbds = []
|
||||
await asyncMapSettled(await this.getField('SR', ref, 'PBDs'), async ref => {
|
||||
if (await this.getField('PBD', ref, 'currently_attached')) {
|
||||
state.unpluggedPbds.push(await this.getField('PBD', ref, 'uuid'))
|
||||
|
||||
await this.callAsync('PBD.unplug', ref)
|
||||
|
||||
$defer.onFailure.call(this, 'callAsync', 'PBD.plug', ref)
|
||||
}
|
||||
})
|
||||
|
||||
await this.setFieldEntry('SR', ref, 'other_config', OC_MAINTENANCE, JSON.stringify(state))
|
||||
}
|
||||
|
||||
// this method is best effort and will not stop on first error
|
||||
async disableMaintenanceMode(ref) {
|
||||
const state = JSON.parse((await this.getField('SR', ref, 'other_config'))[OC_MAINTENANCE])
|
||||
|
||||
// will throw if not in maintenance mode
|
||||
await this.call('SR.remove_from_other_config', ref, OC_MAINTENANCE)
|
||||
|
||||
const errors = []
|
||||
|
||||
await asyncMap(state.unpluggedPbds, async uuid => {
|
||||
try {
|
||||
await this.callAsync('PBD.plug', await this.call('PBD.get_by_uuid', uuid))
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
|
||||
await asyncMap(Object.entries(state.shutdownVms), async ([uuid, isPaused]) => {
|
||||
try {
|
||||
await this.callAsync('VM.start', await this.call('VM.get_by_uuid', uuid), isPaused, true)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
|
||||
if (errors.length !== 0) {
|
||||
throw new AggregateError(errors)
|
||||
}
|
||||
}
|
||||
|
||||
async importVdi(
|
||||
$defer,
|
||||
ref,
|
||||
stream,
|
||||
{ name_label = '[XO] Imported disk - ' + new Date().toISOString(), ...vdiCreateOpts } = {}
|
||||
) {
|
||||
const footer = await peekFooterFromStream(stream)
|
||||
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size: footer.currentSize })
|
||||
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
|
||||
await this.VDI_importContent(vdiRef, stream, { format: VDI_FORMAT_RAW })
|
||||
return vdiRef
|
||||
}
|
||||
}
|
||||
module.exports = Sr
|
||||
|
||||
decorateClass(Sr, { enableMaintenanceMode: defer, importVdi: defer })
|
||||
@@ -6,8 +6,6 @@ const { Ref } = require('xen-api')
|
||||
|
||||
const isVmRunning = require('./_isVmRunning.js')
|
||||
|
||||
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:vbd')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
module.exports = class Vbd {
|
||||
@@ -68,10 +66,8 @@ module.exports = class Vbd {
|
||||
})
|
||||
|
||||
if (isVmRunning(powerState)) {
|
||||
this.callAsync('VBD.plug', vbdRef).catch(warn)
|
||||
await this.callAsync('VBD.plug', vbdRef)
|
||||
}
|
||||
|
||||
return vbdRef
|
||||
}
|
||||
|
||||
async unplug(ref) {
|
||||
|
||||
@@ -30,7 +30,8 @@ class Vdi {
|
||||
other_config = {},
|
||||
read_only = false,
|
||||
sharable = false,
|
||||
SR = this.pool.default_SR,
|
||||
sm_config,
|
||||
SR,
|
||||
tags,
|
||||
type = 'user',
|
||||
virtual_size,
|
||||
@@ -38,10 +39,10 @@ class Vdi {
|
||||
},
|
||||
{
|
||||
// blindly copying `sm_config` from another VDI can create problems,
|
||||
// therefore it should be passed explicitly
|
||||
// therefore it is ignored by default by this method
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4482
|
||||
sm_config,
|
||||
setSmConfig = false,
|
||||
} = {}
|
||||
) {
|
||||
return this.call('VDI.create', {
|
||||
@@ -50,7 +51,7 @@ class Vdi {
|
||||
other_config,
|
||||
read_only,
|
||||
sharable,
|
||||
sm_config,
|
||||
sm_config: setSmConfig ? sm_config : undefined,
|
||||
SR,
|
||||
tags,
|
||||
type,
|
||||
|
||||
@@ -11,8 +11,7 @@ const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
|
||||
const { JsonRpcError } = require('json-rpc-protocol')
|
||||
const { incorrectState } = require('xo-common/api-errors.js')
|
||||
const { Ref } = require('xen-api')
|
||||
|
||||
const extractOpaqueRef = require('./_extractOpaqueRef.js')
|
||||
@@ -344,13 +343,7 @@ class Vm {
|
||||
const vm = await this.getRecord('VM', vmRef)
|
||||
|
||||
if (!bypassBlockedOperation && 'destroy' in vm.blocked_operations) {
|
||||
throw forbiddenOperation(
|
||||
`destroy is blocked: ${
|
||||
vm.blocked_operations.destroy === 'true'
|
||||
? 'protected from accidental deletion'
|
||||
: vm.blocked_operations.destroy
|
||||
}`
|
||||
)
|
||||
throw new Error('destroy is blocked')
|
||||
}
|
||||
|
||||
if (!forceDeleteDefaultTemplate && isDefaultTemplate(vm)) {
|
||||
@@ -510,22 +503,6 @@ class Vm {
|
||||
}
|
||||
return ref
|
||||
} catch (error) {
|
||||
if (
|
||||
// xxhash is the new form consistency hashing in CH 8.1 which uses a faster,
|
||||
// more efficient hashing algorithm to generate the consistency checks
|
||||
// in order to support larger files without the consistency checking process taking an incredibly long time
|
||||
error.code === 'IMPORT_ERROR' &&
|
||||
error.params?.some(
|
||||
param =>
|
||||
param.includes('INTERNAL_ERROR') &&
|
||||
param.includes('Expected to find an inline checksum') &&
|
||||
param.includes('.xxhash')
|
||||
)
|
||||
) {
|
||||
warn('import', { error })
|
||||
throw new JsonRpcError('Importing this VM requires XCP-ng or Citrix Hypervisor >=8.1')
|
||||
}
|
||||
|
||||
// augment the error with as much relevant info as possible
|
||||
const [poolMaster, sr] = await Promise.all([
|
||||
safeGetRecord(this, 'host', this.pool.master),
|
||||
@@ -548,15 +525,11 @@ class Vm {
|
||||
|
||||
// requires the VM to be halted because it's not possible to re-plug VUSB on a live VM
|
||||
if (unplugVusbs && isHalted) {
|
||||
// vm.VUSBs can be undefined (e.g. on XS 7.0.0)
|
||||
const vusbs = vm.VUSBs
|
||||
if (vusbs !== undefined) {
|
||||
await asyncMap(vusbs, async ref => {
|
||||
const vusb = await this.getRecord('VUSB', ref)
|
||||
await vusb.$call('destroy')
|
||||
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
|
||||
})
|
||||
}
|
||||
await asyncMap(vm.VUSBs, async ref => {
|
||||
const vusb = await this.getRecord('VUSB', ref)
|
||||
await vusb.$call('destroy')
|
||||
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
|
||||
})
|
||||
}
|
||||
|
||||
let destroyNobakVdis = false
|
||||
|
||||
129
CHANGELOG.md
129
CHANGELOG.md
@@ -1,130 +1,5 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.72.0** (2022-06-30)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Backup] Merge delta backups without copying data when using VHD directories on NFS/SMB/local remote(https://github.com/vatesfr/xen-orchestra/pull/6271))
|
||||
- [Proxies] Ability to copy the proxy access URL (PR [#6287](https://github.com/vatesfr/xen-orchestra/pull/6287))
|
||||
- [SR/Advanced] Ability to enable/disable _Maintenance Mode_ [#6215](https://github.com/vatesfr/xen-orchestra/issues/6215) (PRs [#6308](https://github.com/vatesfr/xen-orchestra/pull/6308), [#6297](https://github.com/vatesfr/xen-orchestra/pull/6297))
|
||||
- [User] User tokens management through XO interface (PR [#6276](https://github.com/vatesfr/xen-orchestra/pull/6276))
|
||||
- [Tasks, VM/General] Self Service users: show tasks related to their pools, hosts, SRs, networks and VMs (PR [#6217](https://github.com/vatesfr/xen-orchestra/pull/6217))
|
||||
|
||||
### Enhancements
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Backup/Restore] Clearer error message when importing a VM backup requires XCP-n/CH >= 8.1 (PR [#6304](https://github.com/vatesfr/xen-orchestra/pull/6304))
|
||||
- [Backup] Users can use VHD directory on any remote type (PR [#6273](https://github.com/vatesfr/xen-orchestra/pull/6273))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [VDI Import] Fix `this._getOrWaitObject is not a function`
|
||||
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
|
||||
- [OVA Import] Fix import stuck after first disk
|
||||
- [File restore] Ignore symbolic links
|
||||
|
||||
### Released packages
|
||||
|
||||
- @vates/event-listeners-manager 1.0.1
|
||||
- @vates/read-chunk 1.0.0
|
||||
- @xen-orchestra/backups 0.26.0
|
||||
- @xen-orchestra/backups-cli 0.7.4
|
||||
- xo-remote-parser 0.9.1
|
||||
- @xen-orchestra/fs 1.1.0
|
||||
- @xen-orchestra/openflow 0.1.2
|
||||
- @xen-orchestra/xapi 1.4.0
|
||||
- @xen-orchestra/proxy 0.23.4
|
||||
- @xen-orchestra/proxy-cli 0.3.1
|
||||
- vhd-lib 3.3.1
|
||||
- vhd-cli 0.8.0
|
||||
- xo-vmdk-to-vhd 2.4.2
|
||||
- xo-server 5.98.0
|
||||
- xo-web 5.99.0
|
||||
|
||||
## **5.71.1 (2022-06-13)**
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Show raw errors to administrators instead of _unknown error from the peer_ (PR [#6260](https://github.com/vatesfr/xen-orchestra/pull/6260))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New SR] Fix `method.startsWith is not a function` when creating an _ext_ SR
|
||||
- Import VDI content now works when there is a HTTP proxy between XO and the host (PR [#6261](https://github.com/vatesfr/xen-orchestra/pull/6261))
|
||||
- [Backup] Fix `undefined is not iterable (cannot read property Symbol(Symbol.iterator))` on XS 7.0.0
|
||||
- [Backup] Ensure a warning is shown if a target preparation step fails (PR [#6266](https://github.com/vatesfr/xen-orchestra/pull/6266))
|
||||
- [OVA Export] Avoid creating a zombie task (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
|
||||
- [OVA Export] Increase speed by lowering compression to acceptable level (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
|
||||
- [OVA Export] Fix broken OVAs due to special characters in VM name (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/backups 0.25.0
|
||||
- @xen-orchestra/backups-cli 0.7.3
|
||||
- xen-api 1.2.1
|
||||
- @xen-orchestra/xapi 1.2.0
|
||||
- @xen-orchestra/proxy 0.23.2
|
||||
- @xen-orchestra/proxy-cli 0.3.0
|
||||
- xo-cli 0.14.0
|
||||
- xo-vmdk-to-vhd 2.4.1
|
||||
- xo-server 5.96.0
|
||||
- xo-web 5.97.2
|
||||
|
||||
## **5.71.0 (2022-05-31)**
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Backup] _Restore Health Check_ can now be configured to be run automatically during a backup schedule (PRs [#6227](https://github.com/vatesfr/xen-orchestra/pull/6227), [#6228](https://github.com/vatesfr/xen-orchestra/pull/6228), [#6238](https://github.com/vatesfr/xen-orchestra/pull/6238) & [#6242](https://github.com/vatesfr/xen-orchestra/pull/6242))
|
||||
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
|
||||
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
|
||||
- [RPU/Host] If some backup jobs are running on the pool, ask for confirmation before starting an RPU, shutdown/rebooting a host or restarting a host's toolstack (PR [6232](https://github.com/vatesfr/xen-orchestra/pull/6232))
|
||||
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
|
||||
- [REST API] Support VDI creation via VHD import
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Backup] Merge multiple VHDs at once which will speed up the merging phase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
|
||||
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
|
||||
- [VM migration] Ensure the VM can be migrated before performing the migration to avoid issues [#5301](https://github.com/vatesfr/xen-orchestra/issues/5301) (PR [#6245](https://github.com/vatesfr/xen-orchestra/pull/6245))
|
||||
- [Backup] Show any detected errors on existing backups instead of fixing them silently (PR [#6207](https://github.com/vatesfr/xen-orchestra/pull/6225))
|
||||
- Created SRs will now have auto-scan enabled similarly to what XenCenter does (PR [#6246](https://github.com/vatesfr/xen-orchestra/pull/6246))
|
||||
- [RPU] Disable scheduled backup jobs during RPU (PR [#6244](https://github.com/vatesfr/xen-orchestra/pull/6244))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
|
||||
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
|
||||
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
|
||||
- [Home/Self] Don't make VM's resource set name clickable for non admin users as they aren't allowed to view the Self Service page (PR [#6252](https://github.com/vatesfr/xen-orchestra/pull/6252))
|
||||
- [load-balancer] Fix density mode failing to shutdown hosts (PR [#6253](https://github.com/vatesfr/xen-orchestra/pull/6253))
|
||||
- [Health] Make "Too many snapshots" table sortable by number of snapshots (PR [#6255](https://github.com/vatesfr/xen-orchestra/pull/6255))
|
||||
- [Remote] Show complete errors instead of only a potentially missing message (PR [#6216](https://github.com/vatesfr/xen-orchestra/pull/6216))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/self-signed 0.1.3
|
||||
- vhd-lib 3.2.0
|
||||
- @xen-orchestra/fs 1.0.3
|
||||
- vhd-cli 0.7.2
|
||||
- xo-vmdk-to-vhd 2.4.0
|
||||
- @xen-orchestra/upload-ova 0.1.5
|
||||
- @xen-orchestra/xapi 1.1.0
|
||||
- @xen-orchestra/backups 0.24.0
|
||||
- @xen-orchestra/backups-cli 0.7.2
|
||||
- @xen-orchestra/emit-async 1.0.0
|
||||
- @xen-orchestra/mixins 0.5.0
|
||||
- @xen-orchestra/proxy 0.23.1
|
||||
- xo-server 5.95.0
|
||||
- xo-web 5.97.1
|
||||
- xo-server-backup-reports 0.17.0
|
||||
|
||||
## 5.70.2 (2022-05-16)
|
||||
|
||||
### Bug fixes
|
||||
@@ -160,6 +35,8 @@
|
||||
|
||||
## 5.70.0 (2022-04-29)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [VM export] Feat export to `ova` format (PR [#6006](https://github.com/vatesfr/xen-orchestra/pull/6006))
|
||||
@@ -196,6 +73,8 @@
|
||||
|
||||
## **5.69.2** (2022-04-13)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Rolling Pool Update] New algorithm for XCP-ng updates (PR [#6188](https://github.com/vatesfr/xen-orchestra/pull/6188))
|
||||
|
||||
@@ -7,12 +7,20 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [SR] When SR is in maintenance, add "Maintenance mode" badge next to its name (PR [#6313](https://github.com/vatesfr/xen-orchestra/pull/6313))
|
||||
- [Backup] Merge multiple VHDs at once which will speed up the merging ĥase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
|
||||
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
|
||||
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
|
||||
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
|
||||
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
|
||||
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
|
||||
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
|
||||
|
||||
### Packages to release
|
||||
|
||||
> When modifying a package, add it here with its release type.
|
||||
@@ -24,13 +32,22 @@
|
||||
> - patch: if the change is a bug fix or a simple code improvement
|
||||
> - minor: if the change is a new feature
|
||||
> - major: if the change breaks compatibility
|
||||
>
|
||||
> Keep this list alphabetically ordered to avoid merge conflicts
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- @xen-orchestra/self-signed patch
|
||||
- vhd-lib patch
|
||||
- @xen-orchestra/fs patch
|
||||
- vhd-cli patch
|
||||
- xo-vmdk-to-vhd minor
|
||||
- @xen-orchestra/upload-ova patch
|
||||
- @xen-orchestra/backups minor
|
||||
- @xen-orchestra/backups-cli patch
|
||||
- @xen-orchestra/emit-async major
|
||||
- @xen-orchestra/mixins minor
|
||||
- @xen-orchestra/proxy minor
|
||||
- xo-server minor
|
||||
- xo-web minor
|
||||
- vhd-lib minor
|
||||
- xo-server-backup-reports minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
@@ -99,38 +99,3 @@ To solve this issue, we recommend that you:
|
||||
|
||||
- wait until the other backup job is completed/the merge process is done
|
||||
- make sure your remote storage is not being overworked
|
||||
|
||||
## Error: HTTP connection has timed out
|
||||
|
||||
This error occurs when XO tries to fetch data from a host, via the HTTP GET method. This error essentially means that the host (dom0 specifically) isn't responding anymore, after we asked it to expose the disk to be exported. This could be a symptom of having an overloaded dom0 that couldn't respond fast enough. It can also be caused by dom0 having trouble attaching the disk in question to expose it for fetching via HTTP, or just not having enough resources to answer our GET request.
|
||||
|
||||
::: warning
|
||||
As a temporary workaround you can increase the timeout higher than the default value, to allow the host more time to respond. But you will need to eventually diagnose the root cause of the slow host response or else you risk the issue returning.
|
||||
:::
|
||||
|
||||
Create the following file:
|
||||
```
|
||||
/etc/xo-server/config.httpInactivityTimeout.toml
|
||||
```
|
||||
Add the following lines:
|
||||
```
|
||||
# XOA Support - Work-around HTTP timeout issue during backups
|
||||
[xapiOptions]
|
||||
httpInactivityTimeout = 1800000 # 30 mins
|
||||
```
|
||||
|
||||
## Error: Expected values to be strictly equal
|
||||
|
||||
This error occurs at the end of the transfer. XO checks the exported VM disk integrity, to ensure it's a valid VHD file (we check the VHD header as well as the footer of the received file). This error means the header and footage did not match, so the file is incomplete (likely the export from dom0 failed at some point and we only received a partial HD/VM disk).
|
||||
|
||||
## Error: the job is already running
|
||||
|
||||
This means the same job is still running, typically from the last scheduled run. This happens when you have a backup job scheduled too often. It can also occur if you have a long timeout configured for the job, and a slow VM export or slow transfer to your remote. In either case, you need to adjust your backup schedule to allow time for the job to finish or timeout before the next scheduled run. We consider this an error to ensure you'll be notified that the planned schedule won't run this time because the previous one isn't finished.
|
||||
|
||||
## Error: VDI_IO_ERROR
|
||||
|
||||
This error comes directly from your host/dom0, and not XO. Essentially, XO asked the host to expose a VM disk to export via HTTP (as usual), XO managed to make the HTTP GET connection, and even start the transfer. But then at some point the host couldn't read the VM disk any further, causing this error on the host side. This might happen if the VDI is corrupted on the storage, or if there's a race condition during snapshots. More rarely, this can also occur if your SR is just too slow to keep up with the export as well as live VM traffic.
|
||||
|
||||
## Error: no XAPI associated to <UUID>
|
||||
|
||||
This message means that XO had a UUID of a VM to backup, but when the job ran it couldn't find any object matching it. This could be caused by the pool where this VM lived no longer being connected to XO. Double-check that the pool hosting the VM is currently connected under Settings > Servers. You can also search for the VM UUID in the Home > VMs search bar. If you can see it, run the backup job again and it will work. If you cannot, either the VM was removed or the pool is not connected.
|
||||
|
||||
@@ -66,13 +66,12 @@ You shouldn't have to change this. It's the path where `xo-web` files are served
|
||||
|
||||
## Custom certificate authority
|
||||
|
||||
If you use certificates signed by an in-house CA for your XCP-ng or XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you can use the [`NODE_EXTRA_CA_CERTS`](https://nodejs.org/api/cli.html#cli_node_extra_ca_certs_file) environment variable.
|
||||
If you use certificates signed by an in-house CA for your XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you need to add the `--use-openssl-ca` option in Node, but also add the CA to your trust store (`/etc/ssl/certs` via `update-ca-certificates` in your XOA).
|
||||
|
||||
To enable this option in your XOA, create `/etc/systemd/system/xo-server.service.d/ca.conf` with the following content:
|
||||
To enable this option in your XOA, edit the `/etc/systemd/system/xo-server.service` file and add this:
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment=NODE_EXTRA_CA_CERTS=/usr/local/share/ca-certificates/my-cert.crt
|
||||
Environment=NODE_OPTIONS=--use-openssl-ca
|
||||
```
|
||||
|
||||
Don't forget to reload `systemd` conf and restart `xo-server`:
|
||||
@@ -82,7 +81,9 @@ Don't forget to reload `systemd` conf and restart `xo-server`:
|
||||
# systemctl restart xo-server.service
|
||||
```
|
||||
|
||||
> For XO Proxy, the process is almost the same except the file to create is `/etc/systemd/system/xo-proxy.service.d/ca.conf` and the service to restart is `xo-proxy.service`.
|
||||
:::tip
|
||||
The `--use-openssl-ca` option is ignored by Node if Xen-Orchestra is run with Linux capabilities. Capabilities are commonly used to bind applications to privileged ports (<1024) (i.e. `CAP_NET_BIND_SERVICE`). Local NAT rules (`iptables`) or a reverse proxy would be required to use privileged ports and a custom certficate authority.
|
||||
:::
|
||||
|
||||
## Redis server
|
||||
|
||||
|
||||
@@ -18,8 +18,6 @@ If you lose your main pool, you can start the copy on the other side, with very
|
||||
|
||||
:::warning
|
||||
It is normal that you can't boot the copied VM directly: we protect it. The normal workflow is to make a clone and then work on it.
|
||||
|
||||
This also affects VMs with "Auto Power On" enabled, because of our protections you can ensure these won't start on your CR destination if you happen to reboot it.
|
||||
:::
|
||||
|
||||
## Configure it
|
||||
|
||||
@@ -35,7 +35,3 @@ A higher retention number will lead to huge space occupation on your SR.
|
||||
If you boot a copy of your production VM, be careful: if they share the same static IP, you'll have troubles.
|
||||
|
||||
A good way to avoid this kind of problem is to remove the network interface on the DR VM and check if the export is correctly done.
|
||||
|
||||
:::warning
|
||||
For each DR replicated VM, we add "start" as a blocked operation, meaning even VMs with "Auto power on" enabled will not be started on your DR destination if it reboots.
|
||||
:::
|
||||
|
||||
@@ -141,28 +141,6 @@ curl \
|
||||
> myDisk.vhd
|
||||
```
|
||||
|
||||
## VDI Import
|
||||
|
||||
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
|
||||
|
||||
```bash
|
||||
curl \
|
||||
-X POST \
|
||||
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
|
||||
-T myDisk.vhd \
|
||||
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
|
||||
| cat
|
||||
```
|
||||
|
||||
> Note: the final `| cat` ensures cURL's standard output is not a TTY, which is necessary for upload stats to be dislayed.
|
||||
|
||||
This request returns the UUID of the created VDI.
|
||||
|
||||
The following query parameters are supported to customize the created VDI:
|
||||
|
||||
- `name_label`
|
||||
- `name_description`
|
||||
|
||||
## The future
|
||||
|
||||
We are adding features and improving the REST API step by step. If you have interesting use cases or feedback, please ask directly at <https://xcp-ng.org/forum/category/12/xen-orchestra>
|
||||
|
||||
@@ -60,7 +60,6 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/@vates/decorate-with/",
|
||||
"/@vates/event-listeners-manager/",
|
||||
"/@vates/predicates/",
|
||||
"/@xen-orchestra/audit-core/",
|
||||
"/dist/",
|
||||
|
||||
3
packages/complex-matcher/.babelrc.js
Normal file
3
packages/complex-matcher/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
1
packages/complex-matcher/.eslintrc.js
Symbolic link
1
packages/complex-matcher/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,14 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { parse } = require('./')
|
||||
const { ast, pattern } = require('./index.fixtures')
|
||||
|
||||
module.exports = ({ benchmark }) => {
|
||||
benchmark('parse', () => {
|
||||
parse(pattern)
|
||||
})
|
||||
|
||||
benchmark('toString', () => {
|
||||
ast.toString()
|
||||
})
|
||||
}
|
||||
@@ -16,6 +16,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
@@ -25,7 +26,21 @@
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
12
packages/complex-matcher/src/index.bench.js
Normal file
12
packages/complex-matcher/src/index.bench.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import { parse } from './'
|
||||
import { ast, pattern } from './index.fixtures'
|
||||
|
||||
export default ({ benchmark }) => {
|
||||
benchmark('parse', () => {
|
||||
parse(pattern)
|
||||
})
|
||||
|
||||
benchmark('toString', () => {
|
||||
ast.toString()
|
||||
})
|
||||
}
|
||||
@@ -1,10 +1,8 @@
|
||||
'use strict'
|
||||
import * as CM from './'
|
||||
|
||||
const CM = require('./')
|
||||
export const pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
|
||||
|
||||
exports.pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
|
||||
|
||||
exports.ast = new CM.And([
|
||||
export const ast = new CM.And([
|
||||
new CM.String('foo'),
|
||||
new CM.Not(new CM.String('\\ "')),
|
||||
new CM.Property('name', new CM.Or([new CM.String('wonderwoman'), new CM.String('batman')])),
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
const { escapeRegExp, isPlainObject, some } = require('lodash')
|
||||
import { escapeRegExp, isPlainObject, some } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -25,7 +23,7 @@ class Node {
|
||||
}
|
||||
}
|
||||
|
||||
class Null extends Node {
|
||||
export class Null extends Node {
|
||||
match() {
|
||||
return true
|
||||
}
|
||||
@@ -34,11 +32,10 @@ class Null extends Node {
|
||||
return ''
|
||||
}
|
||||
}
|
||||
exports.Null = Null
|
||||
|
||||
const formatTerms = terms => terms.map(term => term.toString(true)).join(' ')
|
||||
|
||||
class And extends Node {
|
||||
export class And extends Node {
|
||||
constructor(children) {
|
||||
super()
|
||||
|
||||
@@ -57,9 +54,8 @@ class And extends Node {
|
||||
return isNested ? `(${terms})` : terms
|
||||
}
|
||||
}
|
||||
exports.And = And
|
||||
|
||||
class Comparison extends Node {
|
||||
export class Comparison extends Node {
|
||||
constructor(operator, value) {
|
||||
super()
|
||||
this._comparator = Comparison.comparators[operator]
|
||||
@@ -75,7 +71,6 @@ class Comparison extends Node {
|
||||
return this._operator + String(this._value)
|
||||
}
|
||||
}
|
||||
exports.Comparison = Comparison
|
||||
Comparison.comparators = {
|
||||
'>': (a, b) => a > b,
|
||||
'>=': (a, b) => a >= b,
|
||||
@@ -83,7 +78,7 @@ Comparison.comparators = {
|
||||
'<=': (a, b) => a <= b,
|
||||
}
|
||||
|
||||
class Or extends Node {
|
||||
export class Or extends Node {
|
||||
constructor(children) {
|
||||
super()
|
||||
|
||||
@@ -101,9 +96,8 @@ class Or extends Node {
|
||||
return `|(${formatTerms(this.children)})`
|
||||
}
|
||||
}
|
||||
exports.Or = Or
|
||||
|
||||
class Not extends Node {
|
||||
export class Not extends Node {
|
||||
constructor(child) {
|
||||
super()
|
||||
|
||||
@@ -118,9 +112,8 @@ class Not extends Node {
|
||||
return '!' + this.child.toString(true)
|
||||
}
|
||||
}
|
||||
exports.Not = Not
|
||||
|
||||
exports.Number = exports.NumberNode = class NumberNode extends Node {
|
||||
export class NumberNode extends Node {
|
||||
constructor(value) {
|
||||
super()
|
||||
|
||||
@@ -140,8 +133,9 @@ exports.Number = exports.NumberNode = class NumberNode extends Node {
|
||||
return String(this.value)
|
||||
}
|
||||
}
|
||||
export { NumberNode as Number }
|
||||
|
||||
class NumberOrStringNode extends Node {
|
||||
export class NumberOrStringNode extends Node {
|
||||
constructor(value) {
|
||||
super()
|
||||
|
||||
@@ -166,9 +160,9 @@ class NumberOrStringNode extends Node {
|
||||
return this.value
|
||||
}
|
||||
}
|
||||
exports.NumberOrString = exports.NumberOrStringNode = NumberOrStringNode
|
||||
export { NumberOrStringNode as NumberOrString }
|
||||
|
||||
class Property extends Node {
|
||||
export class Property extends Node {
|
||||
constructor(name, child) {
|
||||
super()
|
||||
|
||||
@@ -184,13 +178,12 @@ class Property extends Node {
|
||||
return `${formatString(this.name)}:${this.child.toString(true)}`
|
||||
}
|
||||
}
|
||||
exports.Property = Property
|
||||
|
||||
const escapeChar = char => '\\' + char
|
||||
const formatString = value =>
|
||||
Number.isNaN(+value) ? (isRawString(value) ? value : `"${value.replace(/\\|"/g, escapeChar)}"`) : `"${value}"`
|
||||
|
||||
class GlobPattern extends Node {
|
||||
export class GlobPattern extends Node {
|
||||
constructor(value) {
|
||||
// fallback to string node if no wildcard
|
||||
if (value.indexOf('*') === -1) {
|
||||
@@ -223,9 +216,8 @@ class GlobPattern extends Node {
|
||||
return this.value
|
||||
}
|
||||
}
|
||||
exports.GlobPattern = GlobPattern
|
||||
|
||||
class RegExpNode extends Node {
|
||||
export class RegExpNode extends Node {
|
||||
constructor(pattern, flags) {
|
||||
super()
|
||||
|
||||
@@ -253,9 +245,9 @@ class RegExpNode extends Node {
|
||||
return this.re.toString()
|
||||
}
|
||||
}
|
||||
exports.RegExp = RegExpNode
|
||||
export { RegExpNode as RegExp }
|
||||
|
||||
class StringNode extends Node {
|
||||
export class StringNode extends Node {
|
||||
constructor(value) {
|
||||
super()
|
||||
|
||||
@@ -283,9 +275,9 @@ class StringNode extends Node {
|
||||
return formatString(this.value)
|
||||
}
|
||||
}
|
||||
exports.String = exports.StringNode = StringNode
|
||||
export { StringNode as String }
|
||||
|
||||
class TruthyProperty extends Node {
|
||||
export class TruthyProperty extends Node {
|
||||
constructor(name) {
|
||||
super()
|
||||
|
||||
@@ -300,7 +292,6 @@ class TruthyProperty extends Node {
|
||||
return formatString(this.name) + '?'
|
||||
}
|
||||
}
|
||||
exports.TruthyProperty = TruthyProperty
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -540,7 +531,7 @@ const parser = P.grammar({
|
||||
),
|
||||
ws: P.regex(/\s*/),
|
||||
}).default
|
||||
exports.parse = parser.parse.bind(parser)
|
||||
export const parse = parser.parse.bind(parser)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -582,7 +573,7 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
}
|
||||
|
||||
// Find possible values for property clauses in a and clause.
|
||||
exports.getPropertyClausesStrings = function getPropertyClausesStrings(node) {
|
||||
export const getPropertyClausesStrings = node => {
|
||||
if (!node) {
|
||||
return {}
|
||||
}
|
||||
@@ -614,7 +605,7 @@ exports.getPropertyClausesStrings = function getPropertyClausesStrings(node) {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
exports.setPropertyClause = function setPropertyClause(node, name, child) {
|
||||
export const setPropertyClause = (node, name, child) => {
|
||||
const property = child && new Property(name, typeof child === 'string' ? new StringNode(child) : child)
|
||||
|
||||
if (node === undefined) {
|
||||
@@ -1,9 +1,7 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
'use strict'
|
||||
|
||||
const { ast, pattern } = require('./index.fixtures')
|
||||
const {
|
||||
import { ast, pattern } from './index.fixtures'
|
||||
import {
|
||||
getPropertyClausesStrings,
|
||||
GlobPattern,
|
||||
Null,
|
||||
@@ -13,7 +11,7 @@ const {
|
||||
Property,
|
||||
setPropertyClause,
|
||||
StringNode,
|
||||
} = require('./')
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/'))
|
||||
3
packages/value-matcher/.babelrc.js
Normal file
3
packages/value-matcher/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
1
packages/value-matcher/.eslintrc.js
Symbolic link
1
packages/value-matcher/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -16,13 +16,27 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const match = (pattern, value) => {
|
||||
if (Array.isArray(pattern)) {
|
||||
return (
|
||||
@@ -45,6 +43,4 @@ const match = (pattern, value) => {
|
||||
return pattern === value
|
||||
}
|
||||
|
||||
exports.createPredicate = function createPredicate(pattern) {
|
||||
return value => match(pattern, value)
|
||||
}
|
||||
export const createPredicate = pattern => value => match(pattern, value)
|
||||
3
packages/vhd-cli/.babelrc.js
Normal file
3
packages/vhd-cli/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
1
packages/vhd-cli/.eslintrc.js
Symbolic link
1
packages/vhd-cli/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,43 +0,0 @@
|
||||
//
|
||||
// This file has been generated by [index-modules](https://npmjs.com/index-modules)
|
||||
//
|
||||
|
||||
var d = Object.defineProperty
|
||||
function de(o, n, v) {
|
||||
d(o, n, { enumerable: true, value: v })
|
||||
return v
|
||||
}
|
||||
function dl(o, n, g, a) {
|
||||
d(o, n, {
|
||||
configurable: true,
|
||||
enumerable: true,
|
||||
get: function () {
|
||||
return de(o, n, g(a))
|
||||
},
|
||||
})
|
||||
}
|
||||
function r(p) {
|
||||
var v = require(p)
|
||||
return v && v.__esModule
|
||||
? v
|
||||
: typeof v === 'object' || typeof v === 'function'
|
||||
? Object.create(v, { default: { enumerable: true, value: v } })
|
||||
: { default: v }
|
||||
}
|
||||
function e(p, i) {
|
||||
dl(defaults, i, function () {
|
||||
return exports[i].default
|
||||
})
|
||||
dl(exports, i, r, p)
|
||||
}
|
||||
|
||||
d(exports, '__esModule', { value: true })
|
||||
var defaults = de(exports, 'default', {})
|
||||
e('./check.js', 'check')
|
||||
e('./compare.js', 'compare')
|
||||
e('./copy.js', 'copy')
|
||||
e('./info.js', 'info')
|
||||
e('./merge.js', 'merge')
|
||||
e('./raw.js', 'raw')
|
||||
e('./repl.js', 'repl')
|
||||
e('./synthetize.js', 'synthetize')
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "vhd-cli",
|
||||
"version": "0.8.0",
|
||||
"version": "0.7.1",
|
||||
"license": "ISC",
|
||||
"description": "Tools to read/create and merge VHD files",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
|
||||
@@ -16,24 +16,40 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {
|
||||
"vhd-cli": "./index.js"
|
||||
"vhd-cli": "dist/index.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^1.1.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.3.1"
|
||||
"vhd-lib": "^3.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"execa": "^5.0.0",
|
||||
"index-modules": "^0.4.3",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const { createWriteStream } = require('fs')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
@@ -14,7 +12,7 @@ const createOutputStream = path => {
|
||||
return stream
|
||||
}
|
||||
|
||||
exports.writeStream = function writeStream(input, path) {
|
||||
export const writeStream = (input, path) => {
|
||||
const output = createOutputStream(path)
|
||||
|
||||
return new Promise((resolve, reject) =>
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const { VhdFile, checkVhdChain } = require('vhd-lib')
|
||||
const getopts = require('getopts')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { resolve } = require('path')
|
||||
import { VhdFile, checkVhdChain } from 'vhd-lib'
|
||||
import getopts from 'getopts'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const checkVhd = (handler, path) => new VhdFile(handler, path).readHeaderAndFooter()
|
||||
|
||||
module.exports = async function check(rawArgs) {
|
||||
export default async rawArgs => {
|
||||
const { chain, _: args } = getopts(rawArgs, {
|
||||
boolean: ['chain'],
|
||||
default: {
|
||||
@@ -1,11 +1,9 @@
|
||||
'use strict'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { openVhd, Constants } from 'vhd-lib'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import omit from 'lodash/omit'
|
||||
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { openVhd, Constants } = require('vhd-lib')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const omit = require('lodash/omit')
|
||||
|
||||
function deepCompareObjects(src, dest, path) {
|
||||
const deepCompareObjects = function (src, dest, path) {
|
||||
for (const key of Object.keys(src)) {
|
||||
const srcValue = src[key]
|
||||
const destValue = dest[key]
|
||||
@@ -31,7 +29,7 @@ function deepCompareObjects(src, dest, path) {
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = async function compare(args) {
|
||||
export default async args => {
|
||||
if (args.length < 4 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: compare <sourceRemoteUrl> <source VHD> <destionationRemoteUrl> <destination> `
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
'use strict'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { openVhd, VhdFile, VhdDirectory } from 'vhd-lib'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import getopts from 'getopts'
|
||||
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { openVhd, VhdFile, VhdDirectory } = require('vhd-lib')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const getopts = require('getopts')
|
||||
|
||||
module.exports = async function copy(rawArgs) {
|
||||
export default async rawArgs => {
|
||||
const {
|
||||
directory,
|
||||
help,
|
||||
@@ -1,13 +1,9 @@
|
||||
'use strict'
|
||||
|
||||
const { Constants, VhdFile } = require('vhd-lib')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { openVhd } = require('vhd-lib/openVhd')
|
||||
const { resolve } = require('path')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const humanFormat = require('human-format')
|
||||
const invert = require('lodash/invert.js')
|
||||
const UUID = require('uuid')
|
||||
import { Constants, VhdFile } from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
import * as UUID from 'uuid'
|
||||
import humanFormat from 'human-format'
|
||||
import invert from 'lodash/invert.js'
|
||||
|
||||
const { PLATFORMS } = Constants
|
||||
|
||||
@@ -36,8 +32,8 @@ function mapProperties(object, mapping) {
|
||||
return result
|
||||
}
|
||||
|
||||
async function showDetails(handler, path) {
|
||||
const vhd = new VhdFile(handler, resolve(path))
|
||||
export default async args => {
|
||||
const vhd = new VhdFile(getHandler({ url: 'file:///' }), resolve(args[0]))
|
||||
|
||||
try {
|
||||
await vhd.readHeaderAndFooter()
|
||||
@@ -71,29 +67,3 @@ async function showDetails(handler, path) {
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async function showList(handler, paths) {
|
||||
let previousUuid
|
||||
for (const path of paths) {
|
||||
await Disposable.use(openVhd(handler, resolve(path)), async vhd => {
|
||||
const uuid = MAPPERS.uuid(vhd.footer.uuid)
|
||||
const fields = [path, MAPPERS.bytes(vhd.footer.currentSize), uuid, MAPPERS.diskType(vhd.footer.diskType)]
|
||||
if (vhd.footer.diskType === Constants.DISK_TYPES.DIFFERENCING) {
|
||||
const parentUuid = MAPPERS.uuid(vhd.header.parentUuid)
|
||||
fields.push(parentUuid === previousUuid ? '<above VHD>' : parentUuid)
|
||||
}
|
||||
previousUuid = uuid
|
||||
console.log(fields.join(' | '))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = async function info(args) {
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
|
||||
if (args.length === 1) {
|
||||
return showDetails(handler, args[0])
|
||||
}
|
||||
|
||||
return showList(handler, args)
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
'use strict'
|
||||
import { Bar } from 'cli-progress'
|
||||
import { mergeVhd } from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const { Bar } = require('cli-progress')
|
||||
const { mergeVhd } = require('vhd-lib')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { resolve } = require('path')
|
||||
|
||||
module.exports = async function merge(args) {
|
||||
export default async function main(args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <child VHD> <parent VHD>`
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
import { openVhd } from 'vhd-lib'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const { openVhd } = require('vhd-lib')
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { resolve } = require('path')
|
||||
import { writeStream } from '../_utils'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
|
||||
const { writeStream } = require('../_utils')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
|
||||
module.exports = async function raw(args) {
|
||||
export default async args => {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> [<output raw>]`
|
||||
}
|
||||
@@ -1,12 +1,10 @@
|
||||
'use strict'
|
||||
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { relative } from 'path'
|
||||
import { start as createRepl } from 'repl'
|
||||
import * as vhdLib from 'vhd-lib'
|
||||
|
||||
const { asCallback, fromCallback, fromEvent } = require('promise-toolbox')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { relative } = require('path')
|
||||
const { start: createRepl } = require('repl')
|
||||
const vhdLib = require('vhd-lib')
|
||||
|
||||
module.exports = async function repl(args) {
|
||||
export default async args => {
|
||||
const cwd = process.cwd()
|
||||
const handler = getHandler({ url: 'file://' + cwd })
|
||||
await handler.sync()
|
||||
@@ -1,11 +1,9 @@
|
||||
'use strict'
|
||||
import path from 'path'
|
||||
import { createSyntheticStream } from 'vhd-lib'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
|
||||
const path = require('path')
|
||||
const { createSyntheticStream } = require('vhd-lib')
|
||||
const { createWriteStream } = require('fs')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
|
||||
module.exports = async function synthetize(args) {
|
||||
export default async function main(args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> <output VHD>`
|
||||
}
|
||||
@@ -1,11 +1,10 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict'
|
||||
import execPromise from 'exec-promise'
|
||||
|
||||
const execPromise = require('exec-promise')
|
||||
import pkg from '../package.json'
|
||||
|
||||
const pkg = require('./package.json')
|
||||
const commands = require('./commands').default
|
||||
import commands from './commands'
|
||||
|
||||
function runCommand(commands, [command, ...args]) {
|
||||
if (command === undefined || command === '-h' || command === '--help') {
|
||||
@@ -275,26 +275,3 @@ it('can stream content', async () => {
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
it('can check vhd contained in on another', async () => {
|
||||
const rawFile = `${tempDir}/contained`
|
||||
await createRandomFile(rawFile, 4)
|
||||
const containedVhdFileName = `${tempDir}/contained.vhd`
|
||||
await convertFromRawToVhd(rawFile, containedVhdFileName)
|
||||
|
||||
const after = `${tempDir}/after`
|
||||
await createRandomFile(after, 4)
|
||||
|
||||
fs.appendFile(rawFile, await fs.readFile(after))
|
||||
|
||||
const cnotainerVhdFileName = `${tempDir}/container.vhd`
|
||||
await convertFromRawToVhd(rawFile, cnotainerVhdFileName)
|
||||
|
||||
await Disposable.use(async function* () {
|
||||
const handler = yield getSyncedHandler({ url: 'file://' + tempDir })
|
||||
const contained = yield openVhd(handler, 'contained.vhd')
|
||||
const container = yield openVhd(handler, 'container.vhd')
|
||||
expect(await contained.contains(container)).toEqual(false)
|
||||
expect(await container.contains(contained)).toEqual(true)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -104,7 +104,7 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
*
|
||||
* @returns {number} the merged data size
|
||||
*/
|
||||
async mergeBlock(child, blockId) {
|
||||
async coalesceBlock(child, blockId) {
|
||||
const block = await child.readBlock(blockId)
|
||||
await this.writeEntireBlock(block)
|
||||
return block.data.length
|
||||
@@ -334,25 +334,4 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
stream.length = footer.currentSize
|
||||
return stream
|
||||
}
|
||||
|
||||
/*
|
||||
* check if all the data of a child are already contained in this vhd
|
||||
*/
|
||||
|
||||
async containsAllDataOf(child) {
|
||||
await this.readBlockAllocationTable()
|
||||
await child.readBlockAllocationTable()
|
||||
for await (const block of child.blocks()) {
|
||||
const { id, data: childData } = block
|
||||
// block is in child not in parent
|
||||
if (!this.containsBlock(id)) {
|
||||
return false
|
||||
}
|
||||
const { data: parentData } = await this.readBlock(id)
|
||||
if (!childData.equals(parentData)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,25 +53,19 @@ test('Can coalesce block', async () => {
|
||||
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
|
||||
await childDirectoryVhd.readBlockAllocationTable()
|
||||
|
||||
let childBlockData = (await childDirectoryVhd.readBlock(0)).data
|
||||
await parentVhd.mergeBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.coalesceBlock(childFileVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
let parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
// block should be present in parent
|
||||
let childBlockData = (await childFileVhd.readBlock(0)).data
|
||||
expect(parentBlockData.equals(childBlockData)).toEqual(true)
|
||||
// block should not be in child since it's a rename for vhd directory
|
||||
await expect(childDirectoryVhd.readBlock(0)).rejects.toThrowError()
|
||||
|
||||
childBlockData = (await childFileVhd.readBlock(1)).data
|
||||
await parentVhd.mergeBlock(childFileVhd, 1)
|
||||
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
parentBlockData = (await parentVhd.readBlock(1)).data
|
||||
// block should be present in parent in case of mixed vhdfile/vhddirectory
|
||||
expect(parentBlockData.equals(childBlockData)).toEqual(true)
|
||||
// block should still be child
|
||||
await childFileVhd.readBlock(1)
|
||||
parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
childBlockData = (await childDirectoryVhd.readBlock(0)).data
|
||||
expect(parentBlockData).toEqual(childBlockData)
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
}
|
||||
|
||||
static async create(handler, path, { flags = 'wx+', compression } = {}) {
|
||||
await handler.mktree(path)
|
||||
await handler.mkdir(path)
|
||||
const vhd = new VhdDirectory(handler, path, { flags, compression })
|
||||
return {
|
||||
dispose: () => {},
|
||||
@@ -142,13 +142,13 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
return test(this.#blockTable, blockId)
|
||||
}
|
||||
|
||||
#getChunkPath(partName) {
|
||||
_getChunkPath(partName) {
|
||||
return this._path + '/' + partName
|
||||
}
|
||||
|
||||
async _readChunk(partName) {
|
||||
// here we can implement compression and / or crypto
|
||||
const buffer = await this._handler.readFile(this.#getChunkPath(partName))
|
||||
const buffer = await this._handler.readFile(this._getChunkPath(partName))
|
||||
|
||||
const uncompressed = await this.#compressor.decompress(buffer)
|
||||
return {
|
||||
@@ -163,23 +163,17 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
`Can't write a chunk ${partName} in ${this._path} with read permission`
|
||||
)
|
||||
|
||||
// in case of VhdDirectory, we want to create the file if it does not exists
|
||||
const flags = this._opts?.flags === 'r+' ? 'w' : this._opts?.flags
|
||||
const compressed = await this.#compressor.compress(buffer)
|
||||
return this._handler.outputFile(this.#getChunkPath(partName), compressed, { flags })
|
||||
return this._handler.outputFile(this._getChunkPath(partName), compressed, this._opts)
|
||||
}
|
||||
|
||||
// put block in subdirectories to limit impact when doing directory listing
|
||||
#getBlockPath(blockId) {
|
||||
_getBlockPath(blockId) {
|
||||
const blockPrefix = Math.floor(blockId / 1e3)
|
||||
const blockSuffix = blockId - blockPrefix * 1e3
|
||||
return `blocks/${blockPrefix}/${blockSuffix}`
|
||||
}
|
||||
|
||||
_getFullBlockPath(blockId) {
|
||||
return this.#getChunkPath(this.#getBlockPath(blockId))
|
||||
}
|
||||
|
||||
async readHeaderAndFooter() {
|
||||
await this.#readChunkFilters()
|
||||
|
||||
@@ -206,7 +200,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
if (onlyBitmap) {
|
||||
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
|
||||
}
|
||||
const { buffer } = await this._readChunk(this.#getBlockPath(blockId))
|
||||
const { buffer } = await this._readChunk(this._getBlockPath(blockId))
|
||||
return {
|
||||
id: blockId,
|
||||
bitmap: buffer.slice(0, this.bitmapSize),
|
||||
@@ -246,39 +240,25 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
}
|
||||
|
||||
// only works if data are in the same handler
|
||||
// and if the full block is modified in child ( which is the case with xcp)
|
||||
// and if the full block is modified in child ( which is the case whit xcp)
|
||||
// and if the compression type is same on both sides
|
||||
async mergeBlock(child, blockId, isResumingMerge = false) {
|
||||
const childBlockPath = child._getFullBlockPath?.(blockId)
|
||||
async coalesceBlock(child, blockId) {
|
||||
if (
|
||||
childBlockPath !== undefined ||
|
||||
!(child instanceof VhdDirectory) ||
|
||||
this._handler !== child._handler ||
|
||||
child.compressionType !== this.compressionType ||
|
||||
child.compressionType === 'MIXED'
|
||||
child.compressionType !== this.compressionType
|
||||
) {
|
||||
return super.mergeBlock(child, blockId)
|
||||
return super.coalesceBlock(child, blockId)
|
||||
}
|
||||
try {
|
||||
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT' && isResumingMerge === true) {
|
||||
// when resuming, the blocks moved since the last merge state write are
|
||||
// not in the child anymore but it should be ok
|
||||
|
||||
// it will throw an error if block is missing in parent
|
||||
// won't detect if the block was already in parent and is broken/missing in child
|
||||
const { data } = await this.readBlock(blockId)
|
||||
assert.strictEqual(data.length, this.header.blockSize)
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
setBitmap(this.#blockTable, blockId)
|
||||
await this._handler.copy(
|
||||
child._getChunkPath(child._getBlockPath(blockId)),
|
||||
this._getChunkPath(this._getBlockPath(blockId))
|
||||
)
|
||||
return sectorsToBytes(this.sectorsPerBlock)
|
||||
}
|
||||
|
||||
async writeEntireBlock(block) {
|
||||
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
|
||||
await this._writeChunk(this._getBlockPath(block.id), block.buffer)
|
||||
setBitmap(this.#blockTable, block.id)
|
||||
}
|
||||
|
||||
|
||||
@@ -222,14 +222,14 @@ test('Can coalesce block', async () => {
|
||||
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
|
||||
await childDirectoryVhd.readBlockAllocationTable()
|
||||
|
||||
await parentVhd.mergeBlock(childFileVhd, 0)
|
||||
await parentVhd.coalesceBlock(childFileVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
let parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
let childBlockData = (await childFileVhd.readBlock(0)).data
|
||||
expect(parentBlockData).toEqual(childBlockData)
|
||||
|
||||
await parentVhd.mergeBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
|
||||
@@ -43,16 +43,6 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
get compressionType() {
|
||||
const compressionType = this.vhds[0].compressionType
|
||||
for (let i = 0; i < this.vhds.length; i++) {
|
||||
if (compressionType !== this.vhds[i].compressionType) {
|
||||
return 'MIXED'
|
||||
}
|
||||
}
|
||||
return compressionType
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Array<VhdAbstract>} vhds the chain of Vhds used to compute this Vhd, from the deepest child (in position 0), to the root (in the last position)
|
||||
* only the last one can have any type. Other must have type DISK_TYPES.DIFFERENCING (delta)
|
||||
@@ -84,33 +74,17 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
#getVhdWithBlock(blockId) {
|
||||
async readBlock(blockId, onlyBitmap = false) {
|
||||
const index = this.#vhds.findIndex(vhd => vhd.containsBlock(blockId))
|
||||
assert(index !== -1, `no such block ${blockId}`)
|
||||
return this.#vhds[index]
|
||||
}
|
||||
|
||||
async readBlock(blockId, onlyBitmap = false) {
|
||||
// only read the content of the first vhd containing this block
|
||||
return await this.#getVhdWithBlock(blockId).readBlock(blockId, onlyBitmap)
|
||||
}
|
||||
|
||||
async mergeBlock(child, blockId) {
|
||||
throw new Error(`can't coalesce block into a vhd synthetic`)
|
||||
return await this.#vhds[index].readBlock(blockId, onlyBitmap)
|
||||
}
|
||||
|
||||
_readParentLocatorData(id) {
|
||||
return this.#vhds[this.#vhds.length - 1]._readParentLocatorData(id)
|
||||
}
|
||||
_getFullBlockPath(blockId) {
|
||||
const vhd = this.#getVhdWithBlock(blockId)
|
||||
return vhd?._getFullBlockPath(blockId)
|
||||
}
|
||||
|
||||
// return true if all the vhds ar an instance of cls
|
||||
checkVhdsClass(cls) {
|
||||
return this.#vhds.every(vhd => vhd instanceof cls)
|
||||
}
|
||||
}
|
||||
|
||||
// add decorated static method
|
||||
|
||||
@@ -8,7 +8,7 @@ const { Disposable } = require('promise-toolbox')
|
||||
|
||||
module.exports = async function chain(parentHandler, parentPath, childHandler, childPath, force = false) {
|
||||
await Disposable.use(
|
||||
[openVhd(parentHandler, parentPath), openVhd(childHandler, childPath, { flags: 'r+' })],
|
||||
[openVhd(parentHandler, parentPath), openVhd(childHandler, childPath)],
|
||||
async ([parentVhd, childVhd]) => {
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
'use strict'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { parseVhdStream } = require('./parseVhdStream.js')
|
||||
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
|
||||
|
||||
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
|
||||
const vhd = yield VhdDirectory.create(handler, path, { compression })
|
||||
await asyncEach(
|
||||
@@ -53,7 +50,7 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
|
||||
}
|
||||
} catch (error) {
|
||||
// cleanup on error
|
||||
await handler.rmtree(path).catch(warn)
|
||||
await handler.rmtree(path)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,7 @@ exports.checkVhdChain = require('./checkChain')
|
||||
exports.createReadableSparseStream = require('./createReadableSparseStream')
|
||||
exports.createVhdStreamWithLength = require('./createVhdStreamWithLength')
|
||||
exports.createVhdDirectoryFromStream = require('./createVhdDirectoryFromStream').createVhdDirectoryFromStream
|
||||
const { mergeVhd } = require('./merge')
|
||||
exports.mergeVhd = mergeVhd
|
||||
exports.mergeVhd = require('./merge')
|
||||
exports.peekFooterFromVhdStream = require('./peekFooterFromVhdStream')
|
||||
exports.openVhd = require('./openVhd').openVhd
|
||||
exports.VhdAbstract = require('./Vhd/VhdAbstract').VhdAbstract
|
||||
|
||||
@@ -9,7 +9,6 @@ const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
|
||||
const { VhdFile, chainVhd, mergeVhd } = require('./index')
|
||||
const { _cleanupVhds: cleanupVhds } = require('./merge')
|
||||
|
||||
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
|
||||
|
||||
@@ -39,15 +38,14 @@ test('merge works in normal cases', async () => {
|
||||
await createRandomFile(`${tempDir}/${childRandomFileName}`, mbOfChildren)
|
||||
await convertFromRawToVhd(`${tempDir}/${childRandomFileName}`, `${tempDir}/${child1FileName}`)
|
||||
await chainVhd(handler, parentFileName, handler, child1FileName, true)
|
||||
await checkFile(`${tempDir}/${parentFileName}`)
|
||||
|
||||
// merge
|
||||
await mergeVhd(handler, parentFileName, handler, child1FileName)
|
||||
|
||||
// check that the merged vhd is still valid
|
||||
await checkFile(`${tempDir}/${child1FileName}`)
|
||||
// check that vhd is still valid
|
||||
await checkFile(`${tempDir}/${parentFileName}`)
|
||||
|
||||
const parentVhd = new VhdFile(handler, child1FileName)
|
||||
const parentVhd = new VhdFile(handler, parentFileName)
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
|
||||
@@ -140,11 +138,11 @@ test('it can resume a merge ', async () => {
|
||||
await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
|
||||
// reload header footer and block allocation table , they should succed
|
||||
await childVhd.readHeaderAndFooter()
|
||||
await childVhd.readBlockAllocationTable()
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
let offset = 0
|
||||
// check that the data are the same as source
|
||||
for await (const block of childVhd.blocks()) {
|
||||
for await (const block of parentVhd.blocks()) {
|
||||
const blockContent = block.data
|
||||
// first block is marked as already merged, should not be modified
|
||||
// second block should come from children
|
||||
@@ -155,7 +153,7 @@ test('it can resume a merge ', async () => {
|
||||
await fs.read(fd, buffer, 0, buffer.length, offset)
|
||||
|
||||
expect(buffer.equals(blockContent)).toEqual(true)
|
||||
offset += childVhd.header.blockSize
|
||||
offset += parentVhd.header.blockSize
|
||||
}
|
||||
})
|
||||
|
||||
@@ -185,9 +183,9 @@ test('it merge multiple child in one pass ', async () => {
|
||||
await mergeVhd(handler, parentFileName, handler, [grandChildFileName, childFileName])
|
||||
|
||||
// check that vhd is still valid
|
||||
await checkFile(grandChildFileName)
|
||||
await checkFile(parentFileName)
|
||||
|
||||
const parentVhd = new VhdFile(handler, grandChildFileName)
|
||||
const parentVhd = new VhdFile(handler, parentFileName)
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
|
||||
@@ -208,21 +206,3 @@ test('it merge multiple child in one pass ', async () => {
|
||||
offset += parentVhd.header.blockSize
|
||||
}
|
||||
})
|
||||
|
||||
test('it cleans vhd mergedfiles', async () => {
|
||||
const handler = getHandler({ url: `file://${tempDir}` })
|
||||
|
||||
await handler.writeFile('parent', 'parentData')
|
||||
await handler.writeFile('child1', 'child1Data')
|
||||
await handler.writeFile('child2', 'child2Data')
|
||||
await handler.writeFile('child3', 'child3Data')
|
||||
|
||||
// childPath is from the grand children to the children
|
||||
await cleanupVhds(handler, 'parent', ['child3', 'child2', 'child1'], { remove: true })
|
||||
|
||||
// only child3 should stay, with the data of parent
|
||||
const [child3, ...other] = await handler.list('.')
|
||||
expect(other.length).toEqual(0)
|
||||
expect(child3).toEqual('child3')
|
||||
expect((await handler.readFile('child3')).toString('utf8')).toEqual('parentData')
|
||||
})
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
const assert = require('assert')
|
||||
const noop = require('./_noop')
|
||||
const UUID = require('uuid')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
@@ -13,34 +12,11 @@ const { basename, dirname } = require('path')
|
||||
const { DISK_TYPES } = require('./_constants')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
const { VhdAbstract } = require('./Vhd/VhdAbstract')
|
||||
const { VhdDirectory } = require('./Vhd/VhdDirectory')
|
||||
const { VhdSynthetic } = require('./Vhd/VhdSynthetic')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { warn } = createLogger('vhd-lib:merge')
|
||||
|
||||
// The chain we want to merge is [ ancestor, child_1, ..., child_n]
|
||||
//
|
||||
// 1. Create a VhdSynthetic from all children if more than 1 child
|
||||
// 2. Merge the resulting VHD into the ancestor
|
||||
// 2.a if at least one is a file: copy file part from child to parent
|
||||
// 2.b if they are all VhdDirectory: move blocks from children to the ancestor
|
||||
// 3. Update the size, UUID and timestamp of the ancestor with those of child_n
|
||||
// 3. Delete all (now) unused VHDs
|
||||
// 4. Rename the ancestor to to child_n
|
||||
//
|
||||
// VhdSynthetic
|
||||
// |
|
||||
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
|
||||
// [ ancestor, child_1, ...,child_n-1, child_n ]
|
||||
// | \____________________/ ^
|
||||
// | | |
|
||||
// | unused VHDs |
|
||||
// | |
|
||||
// \_____________rename_____________/
|
||||
|
||||
// write the merge progress file at most every `delay` seconds
|
||||
function makeThrottledWriter(handler, path, delay) {
|
||||
let lastWrite = Date.now()
|
||||
return async json => {
|
||||
@@ -52,45 +28,21 @@ function makeThrottledWriter(handler, path, delay) {
|
||||
}
|
||||
}
|
||||
|
||||
// make the rename / delete part of the merge process
|
||||
// will fail if parent and children are in different remote
|
||||
|
||||
function cleanupVhds(handler, parent, children, { logInfo = noop, remove = false } = {}) {
|
||||
if (!Array.isArray(children)) {
|
||||
children = [children]
|
||||
}
|
||||
const mergeTargetChild = children.shift()
|
||||
|
||||
return Promise.all([
|
||||
VhdAbstract.rename(handler, parent, mergeTargetChild),
|
||||
asyncMap(children, child => {
|
||||
logInfo(`the VHD child is already merged`, { child })
|
||||
if (remove) {
|
||||
logInfo(`deleting merged VHD child`, { child })
|
||||
return VhdAbstract.unlink(handler, child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
}
|
||||
module.exports._cleanupVhds = cleanupVhds
|
||||
|
||||
// Merge one or multiple vhd child into vhd parent.
|
||||
// childPath can be array to create a synthetic VHD from multiple VHDs
|
||||
// childPath is from the grand children to the children
|
||||
//
|
||||
// TODO: rename the VHD file during the merge
|
||||
module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
module.exports = limitConcurrency(2)(async function merge(
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
{ onProgress = noop, logInfo = noop, remove } = {}
|
||||
{ onProgress = noop } = {}
|
||||
) {
|
||||
const mergeStatePath = dirname(parentPath) + '/' + '.' + basename(parentPath) + '.merge.json'
|
||||
|
||||
return await Disposable.use(async function* () {
|
||||
let mergeState
|
||||
let isResuming = false
|
||||
try {
|
||||
const mergeStateContent = await parentHandler.readFile(mergeStatePath)
|
||||
mergeState = JSON.parse(mergeStateContent)
|
||||
@@ -107,26 +59,22 @@ module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
checkSecondFooter: mergeState === undefined,
|
||||
})
|
||||
let childVhd
|
||||
const parentIsVhdDirectory = parentVhd instanceof VhdDirectory
|
||||
let childIsVhdDirectory
|
||||
if (Array.isArray(childPath)) {
|
||||
childVhd = yield VhdSynthetic.open(childHandler, childPath)
|
||||
childIsVhdDirectory = childVhd.checkVhdsClass(VhdDirectory)
|
||||
} else {
|
||||
childVhd = yield openVhd(childHandler, childPath)
|
||||
childIsVhdDirectory = childVhd instanceof VhdDirectory
|
||||
}
|
||||
|
||||
const concurrency = parentIsVhdDirectory && childIsVhdDirectory ? 16 : 1
|
||||
const concurrency = childVhd instanceof VhdDirectory ? 16 : 1
|
||||
|
||||
if (mergeState === undefined) {
|
||||
// merge should be along a vhd chain
|
||||
assert.strictEqual(UUID.stringify(childVhd.header.parentUuid), UUID.stringify(parentVhd.footer.uuid))
|
||||
assert.strictEqual(childVhd.header.parentUuid.equals(parentVhd.footer.uuid), true)
|
||||
const parentDiskType = parentVhd.footer.diskType
|
||||
assert(parentDiskType === DISK_TYPES.DIFFERENCING || parentDiskType === DISK_TYPES.DYNAMIC)
|
||||
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
|
||||
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
|
||||
} else {
|
||||
isResuming = true
|
||||
// vhd should not have changed to resume
|
||||
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
|
||||
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
|
||||
@@ -167,12 +115,12 @@ module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
let counter = 0
|
||||
|
||||
const mergeStateWriter = makeThrottledWriter(parentHandler, mergeStatePath, 10e3)
|
||||
|
||||
await asyncEach(
|
||||
toMerge,
|
||||
async blockId => {
|
||||
merging.add(blockId)
|
||||
mergeState.mergedDataSize += await parentVhd.mergeBlock(childVhd, blockId, isResuming)
|
||||
|
||||
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
merging.delete(blockId)
|
||||
|
||||
onProgress({
|
||||
@@ -207,8 +155,6 @@ module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
// should be a disposable
|
||||
parentHandler.unlink(mergeStatePath).catch(warn)
|
||||
|
||||
await cleanupVhds(parentHandler, parentPath, childPath, { logInfo, remove })
|
||||
|
||||
return mergeState.mergedDataSize
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "vhd-lib",
|
||||
"version": "3.3.1",
|
||||
"version": "3.1.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^0.1.0",
|
||||
"@vates/read-chunk": "^1.0.0",
|
||||
"@vates/read-chunk": "^0.1.2",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
@@ -29,7 +29,7 @@
|
||||
"uuid": "^8.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@xen-orchestra/fs": "^1.1.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"execa": "^5.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"rimraf": "^3.0.2",
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
const { readChunk } = require('@vates/read-chunk')
|
||||
|
||||
const { FOOTER_SIZE } = require('./_constants')
|
||||
const { unpackFooter } = require('./Vhd/_utils.js')
|
||||
const { fuFooter } = require('./_structs')
|
||||
|
||||
module.exports = async function peekFooterFromStream(stream) {
|
||||
const buffer = await readChunk(stream, FOOTER_SIZE)
|
||||
stream.unshift(buffer)
|
||||
return unpackFooter(buffer)
|
||||
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
stream.unshift(footerBuffer)
|
||||
return footer
|
||||
}
|
||||
|
||||
@@ -85,9 +85,10 @@ async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
|
||||
await fs.mkdir(path + '/blocks/0/')
|
||||
const stats = await fs.stat(rawFileName)
|
||||
|
||||
for (let i = 0, offset = 0; offset < stats.size; i++, offset += blockDataSize) {
|
||||
const sizeMB = stats.size / 1024 / 1024
|
||||
for (let i = 0, offset = 0; i < sizeMB; i++, offset += blockDataSize) {
|
||||
const blockData = Buffer.alloc(blockDataSize)
|
||||
await fs.read(srcRaw, blockData, 0, blockData.length, offset)
|
||||
await fs.read(srcRaw, blockData, offset)
|
||||
await fs.writeFile(path + '/blocks/0/' + i, Buffer.concat([bitmap, blockData]))
|
||||
}
|
||||
await fs.close(srcRaw)
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
|
||||
@@ -8,6 +8,6 @@
|
||||
"promise-toolbox": "^0.19.2",
|
||||
"readable-stream": "^3.1.1",
|
||||
"throttle": "^1.0.3",
|
||||
"vhd-lib": "^3.3.1"
|
||||
"vhd-lib": "^3.1.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xen-api",
|
||||
"version": "1.2.1",
|
||||
"version": "1.2.0",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
|
||||
@@ -465,8 +465,6 @@ export class Xapi extends EventEmitter {
|
||||
await this._setHostAddressInUrl(url, host)
|
||||
|
||||
const doRequest = httpRequest.put.bind(undefined, $cancelToken, {
|
||||
agent: this.httpAgent,
|
||||
|
||||
body,
|
||||
headers,
|
||||
rejectUnauthorized: !this._allowUnauthorized,
|
||||
@@ -488,6 +486,7 @@ export class Xapi extends EventEmitter {
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
|
||||
maxRedirects: 0,
|
||||
agent: this.httpAgent,
|
||||
}).then(
|
||||
response => {
|
||||
response.cancel()
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { ensureDir as mkdirp } from 'fs-extra'
|
||||
import { readFile, writeFile } from 'fs/promises'
|
||||
import { xdgConfig } from 'xdg-basedir'
|
||||
import lodashGet from 'lodash/get.js'
|
||||
import lodashUnset from 'lodash/unset.js'
|
||||
import xdgBasedir from 'xdg-basedir'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const configPath = xdgConfig + '/xo-cli'
|
||||
const configPath = xdgBasedir.config + '/xo-cli'
|
||||
const configFile = configPath + '/config.json'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -75,23 +75,13 @@ async function parseRegisterArgs(args) {
|
||||
}
|
||||
}
|
||||
|
||||
async function _createToken({ allowUnauthorized, description, email, expiresIn, password, url }) {
|
||||
async function _createToken({ allowUnauthorized, email, expiresIn, password, url }) {
|
||||
const xo = new Xo({ rejectUnauthorized: !allowUnauthorized, url })
|
||||
await xo.open()
|
||||
try {
|
||||
await xo.signIn({ email, password })
|
||||
console.warn('Successfully logged with', xo.user.email)
|
||||
await xo.signIn({ email, password })
|
||||
console.warn('Successfully logged with', xo.user.email)
|
||||
|
||||
return await xo.call('token.create', { description, expiresIn }).catch(error => {
|
||||
// if invalid parameter error, retry without description for backward compatibility
|
||||
if (error.code === 10) {
|
||||
return xo.call('token.create', { expiresIn })
|
||||
}
|
||||
throw error
|
||||
})
|
||||
} finally {
|
||||
await xo.close()
|
||||
}
|
||||
return await xo.call('token.create', { expiresIn })
|
||||
}
|
||||
|
||||
function createOutputStream(path) {
|
||||
@@ -282,10 +272,7 @@ function main(args) {
|
||||
COMMANDS.help = help
|
||||
|
||||
async function createToken(args) {
|
||||
const opts = await parseRegisterArgs(args)
|
||||
opts.description = 'xo-cli --createToken'
|
||||
|
||||
const token = await _createToken(opts)
|
||||
const token = await _createToken(await parseRegisterArgs(args))
|
||||
console.warn('Authentication token created')
|
||||
console.warn()
|
||||
console.log(token)
|
||||
@@ -294,7 +281,6 @@ COMMANDS.createToken = createToken
|
||||
|
||||
async function register(args) {
|
||||
const opts = await parseRegisterArgs(args)
|
||||
opts.description = 'xo-cli --register'
|
||||
|
||||
await config.set({
|
||||
allowUnauthorized: opts.allowUnauthorized,
|
||||
@@ -411,67 +397,64 @@ async function call(args) {
|
||||
delete params['@']
|
||||
|
||||
const xo = await connect()
|
||||
try {
|
||||
// FIXME: do not use private properties.
|
||||
const baseUrl = xo._url.replace(/^ws/, 'http')
|
||||
const httpOptions = {
|
||||
rejectUnauthorized: !(await config.load()).allowUnauthorized,
|
||||
|
||||
// FIXME: do not use private properties.
|
||||
const baseUrl = xo._url.replace(/^ws/, 'http')
|
||||
const httpOptions = {
|
||||
rejectUnauthorized: !(await config.load()).allowUnauthorized,
|
||||
}
|
||||
|
||||
const result = await xo.call(method, params)
|
||||
let keys, key, url
|
||||
if (isObject(result) && (keys = getKeys(result)).length === 1) {
|
||||
key = keys[0]
|
||||
|
||||
if (key === '$getFrom') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
const output = createOutputStream(file)
|
||||
const response = await hrp(url, httpOptions)
|
||||
|
||||
const progress = progressStream(
|
||||
{
|
||||
length: response.headers['content-length'],
|
||||
time: 1e3,
|
||||
},
|
||||
printProgress
|
||||
)
|
||||
|
||||
return fromCallback(pipeline, response, progress, output)
|
||||
}
|
||||
|
||||
const result = await xo.call(method, params)
|
||||
let keys, key, url
|
||||
if (isObject(result) && (keys = getKeys(result)).length === 1) {
|
||||
key = keys[0]
|
||||
if (key === '$sendTo') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
|
||||
if (key === '$getFrom') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
const output = createOutputStream(file)
|
||||
const response = await hrp(url, httpOptions)
|
||||
|
||||
const progress = progressStream(
|
||||
const { size: length } = await stat(file)
|
||||
const input = pipeline(
|
||||
createReadStream(file),
|
||||
progressStream(
|
||||
{
|
||||
length: response.headers['content-length'],
|
||||
length,
|
||||
time: 1e3,
|
||||
},
|
||||
printProgress
|
||||
)
|
||||
),
|
||||
noop
|
||||
)
|
||||
|
||||
return fromCallback(pipeline, response, progress, output)
|
||||
}
|
||||
|
||||
if (key === '$sendTo') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
|
||||
const { size: length } = await stat(file)
|
||||
const input = pipeline(
|
||||
createReadStream(file),
|
||||
progressStream(
|
||||
{
|
||||
length,
|
||||
time: 1e3,
|
||||
},
|
||||
printProgress
|
||||
),
|
||||
noop
|
||||
)
|
||||
|
||||
return hrp
|
||||
.post(url, httpOptions, {
|
||||
body: input,
|
||||
headers: {
|
||||
'content-length': length,
|
||||
},
|
||||
})
|
||||
.readAll('utf-8')
|
||||
}
|
||||
return hrp
|
||||
.post(url, httpOptions, {
|
||||
body: input,
|
||||
headers: {
|
||||
'content-length': length,
|
||||
},
|
||||
})
|
||||
.readAll('utf-8')
|
||||
}
|
||||
|
||||
return result
|
||||
} finally {
|
||||
await xo.close()
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
COMMANDS.call = call
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xo-cli",
|
||||
"version": "0.14.0",
|
||||
"version": "0.13.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI for Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -29,7 +29,7 @@
|
||||
"node": ">=14.13"
|
||||
},
|
||||
"dependencies": {
|
||||
"chalk": "^5.0.1",
|
||||
"chalk": "^4.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-extra": "^10.0.0",
|
||||
"getopts": "^2.3.0",
|
||||
@@ -37,11 +37,11 @@
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^4.0.2",
|
||||
"pretty-ms": "^8.0.0",
|
||||
"pretty-ms": "^7.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"pw": "^0.0.4",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.11.1"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
3
packages/xo-common/.babelrc.js
Normal file
3
packages/xo-common/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user